GreatSQL社区

搜索

[已解决] 数据库存在大量:Waiting in connection_control plugin

313 4 2024-9-24 18:22
本帖最后由 驭无殇1998 于 2024-9-25 09:01 编辑

greatsql版本:8.0.32-26

服务器:centos7.6

今天在部署后端服务的时候发现,MySQL连接数一下就耗尽了,查看进程,发现有大量的“Waiting in connection_control plugin”状态的连接,并且在不断新增





配置文件:

[client]
user = mysql
socket = /data/greatsql/run/mysql.sock
[mysql]
loose-skip-binary-as-hex
prompt = "(\\D)[\\u@GreatSQL][\\d]>"
no-auto-rehash
[mysqld]
user    = mysql
port    = 3306
server_id = 79
basedir = /usr/local/greatsql
datadir = /data/greatsql/data
socket = /data/greatsql/run/mysql.sock
pid-file = /data/greatsql/run/mysql.pid
character-set-server = UTF8MB4
skip_name_resolve = ON
default_time_zone = "+8:00"
bind_address = "0.0.0.0"
secure_file_priv = /data/greatsql

# Performance
lock_wait_timeout = 3600
open_files_limit    = 65535
back_log = 1024
max_connections = 5000
max_connect_errors = 1000000
table_open_cache = 1024
table_definition_cache = 1024
thread_stack = 512K
sort_buffer_size = 4M
join_buffer_size = 4M
read_buffer_size = 8M
read_rnd_buffer_size = 4M
bulk_insert_buffer_size = 64M
thread_cache_size = 768
interactive_timeout = 600
wait_timeout = 300
tmp_table_size = 32M
max_heap_table_size = 32M
max_allowed_packet = 64M
net_buffer_shrink_interval = 180
sql_generate_invisible_primary_key = ON
loose-lock_ddl_polling_mode = ON
loose-lock_ddl_polling_runtime = 200

# 口令策略
#validate-password=FORCE_PLUS_PERMANENT
validate_password.policy = 1
validate_password.mixed_case_count = 1
validate_password.number_count = 1
validate_password.special_char_count = 1
# 密码有效期
default_password_lifetime=90
# 登录失败策略
plugin-load-add=connection_control.so
connection-control=FORCE_PLUS_PERMANENT
connection-control-failed-login-attempts=FORCE_PLUS_PERMANENT
connection_control_failed_connections_threshold = 5
connection_control_min_connection_delay = 600000
connection_control_max_connection_delay = 600000

#开启SSL认证
ssl-ca=/data/greatsql/data/ca.pem
ssl-cert=/data/greatsql/data/server-cert.pem
ssl-key=/data/greatsql/data/server-key.pem
#开启general日志
general_log = ON
general_log_file = /data/greatsql/logs/general.log
# Logs
log_timestamps = SYSTEM
log_error = /data/greatsql/logs/error.log
log_error_verbosity = 3
slow_query_log = ON
log_slow_extra = ON
slow_query_log_file = /data/greatsql/logs/slow.log
long_query_time = 2
log_queries_not_using_indexes = ON
log_throttle_queries_not_using_indexes = 60
min_examined_row_limit = 100
log_slow_admin_statements = ON
log_slow_replica_statements = ON
log_slow_verbosity = FULL
log_bin = /data/greatsql/data/binlog
log-bin-index= /data/greatsql/data/binlog.index
binlog_format = ROW
sync_binlog = 1
binlog_cache_size = 4M
max_binlog_cache_size = 2G
max_binlog_size = 1G
binlog_space_limit = 300G
binlog_rows_query_log_events = ON
binlog_expire_logs_seconds = 604800
binlog_checksum = CRC32
gtid_mode = ON
enforce_gtid_consistency = ON

# Replication
relay-log = relaylog
relay_log_recovery = ON
replica_parallel_type = LOGICAL_CLOCK
replica_parallel_workers = 16
binlog_transaction_dependency_tracking = WRITESET
replica_preserve_commit_order = ON
replica_checkpoint_period = 2
loose-rpl_read_binlog_speed_limit = 100

# MGR
loose-plugin_load_add = 'mysql_clone.so'
loose-plugin_load_add = 'group_replication.so'
loose-group_replication_group_name = "c20a1190-8366-4da4-acd5-82dec9cd0e91"
loose-group_replication_local_address = "greatsql_mgr_cluster_node2:33061"
loose-group_replication_group_seeds = "greatsql_mgr_cluster_node1:33061,greatsql_mgr_cluster_node2:33061,greatsql_mgr_cluster_node3:33061"
loose-group_replication_communication_stack = "XCOM"
loose-group_replication_recovery_use_ssl = OFF
loose-group_replication_ssl_mode = DISABLED
loose-group_replication_start_on_boot = OFF
loose-group_replication_bootstrap_group = OFF
loose-group_replication_exit_state_action = READ_ONLY
loose-group_replication_flow_control_mode = "DISABLED"
loose-group_replication_single_primary_mode = ON
loose-group_replication_enforce_update_everywhere_checks = OFF
loose-group_replication_majority_after_mode = ON
loose-group_replication_communication_max_message_size = 10M
loose-group_replication_arbitrator = OFF
loose-group_replication_single_primary_fast_mode = 1
loose-group_replication_request_time_threshold = 100
loose-group_replication_primary_election_mode = GTID_FIRST
loose-group_replication_unreachable_majority_timeout = 0
loose-group_replication_member_expel_timeout = 5
loose-group_replication_autorejoin_tries = 288
loose-group_replication_recovery_get_public_key = ON
loose-group_replication_donor_threshold = 100
report_host = "10.196.14.79"

# InnoDB
innodb_buffer_pool_size = 16G
innodb_buffer_pool_instances = 8
innodb_data_file_path = ibdata1:12M:autoextend
innodb_flush_log_at_trx_commit = 1
innodb_log_buffer_size = 32M
innodb_redo_log_capacity = 6G
innodb_doublewrite_files = 2
innodb_max_undo_log_size = 4G
innodb_io_capacity = 4000
innodb_io_capacity_max = 8000
innodb_open_files = 65535
innodb_flush_method = O_DIRECT
innodb_lru_scan_depth = 4000
innodb_lock_wait_timeout = 10
innodb_rollback_on_timeout = ON
innodb_print_all_deadlocks = ON
innodb_online_alter_log_max_size = 4G
innodb_print_ddl_logs = ON
innodb_status_file = ON
innodb_status_output = OFF
innodb_status_output_locks = ON
innodb_sort_buffer_size = 64M
innodb_adaptive_hash_index = OFF
innodb_numa_interleave = OFF
innodb_spin_wait_delay = 20
innodb_print_lock_wait_timeout_info = ON
innodb_change_buffering = none
kill_idle_transaction = 300
innodb_data_file_async_purge = ON

#innodb monitor settings
#innodb_monitor_enable = "module_innodb,module_server,module_dml,module_ddl,module_trx,module_os,module_purge,module_log,module_lock,module_buffer,module_index,module_ibuf_system,module_buffer_page,module_adaptive_hash"

#pfs settings
performance_schema = 1
#performance_schema_instrument = '%memory%=on'
performance_schema_instrument = '%lock%=on'



我当前的架构是使用的    greatsql+mysqlrouter+nginx+keepalive做高可用负载均衡,本来是想用lvs,但是云服务商不支持lvs自建。。。。。
nginx配置:

user  nginx;
worker_processes  auto;

error_log  /usr/local/nginx/logs/error.log error;

events {
    worker_connections  1024;
}
stream {
    # 全局配置
    preread_timeout        120s;
    proxy_connect_timeout  300s;
    proxy_protocol_timeout 120s;
    resolver_timeout       120s;
    proxy_timeout          300s;
    tcp_nodelay            on;
    # 设置日志格式
    log_format proxy '$remote_addr [$time_local] '
                  '$protocol $status $bytes_sent $bytes_received '
                  '$session_time "$upstream_addr" "$upstream_bytes_sent"'
                  '"$upstream_bytes_received" "$upstream_connect_time"';
    # 配置日志
    access_log /usr/local/nginx/logs/stream_access.log proxy;
    error_log  /usr/local/nginx/logs/stream_error.log error;



    upstream greatsql-read-services {
        hash $remote_addr consistent;
        server xxxxx:6446 weight=1 max_fails=2 fail_timeout=60s;
        server xxxxxxx:6446 weight=1 max_fails=2 fail_timeout=60s;
        server 1xxxxx:6446 weight=1 max_fails=2 fail_timeout=60s;
    }
    # TCP代理(GREATSQL)
    server {
            listen      6446 so_keepalive=on;
            proxy_connect_timeout  3600s;
            proxy_timeout          3600s;
            proxy_pass greatsql-services;
    }

}

全部回复(4)
yejr 2024-9-24 20:43:05
可以参考我以前整理的《叶问》专栏,需要合理设置 CONNECTION_CONTROL 相关的几个参数

https://mp.weixin.qq.com/s/Fkjuf8YpsJvWGIQBZHd69w
驭无殇1998 2024-9-25 09:00:57
yejr 发表于 2024-9-24 20:43
可以参考我以前整理的《叶问》专栏,需要合理设置 CONNECTION_CONTROL 相关的几个参数

https://mp.weixin. ...

这个是等保要求配置的。。。。。哎,我先注释,等系统稳定了再开启吧
yejr 2024-9-25 12:09:51
驭无殇1998 发表于 2024-9-25 09:00
这个是等保要求配置的。。。。。哎,我先注释,等系统稳定了再开启吧

失败次数阈值太小,delay的值又太高,所以就有大量的wait了
驭无殇1998 2024-9-25 16:30:01
yejr 发表于 2024-9-25 12:09
失败次数阈值太小,delay的值又太高,所以就有大量的wait了

等保就要求这么配置,哎
驭无殇1998

22

主题

0

博客

181

贡献

中级会员

Rank: 3Rank: 3

积分
297

勤学好问(铜)助人为乐(银)

合作电话:010-64087828

社区邮箱:greatsql@greatdb.com

社区公众号
社区小助手
QQ群
GMT+8, 2024-11-21 21:36 , Processed in 0.044351 second(s), 17 queries , Redis On.
快速回复 返回顶部 返回列表