greatsql配置求助
公司配置是12C 24G的机器,以往mysql的mem_limit 是512M初始化脚本中有一张表初始化需要导入55万条数据,单表大小70MB
放在docker-entrypoint-initdb.d 目录下
现在greatsql每次初始化导入sql的语句执行到这张表的时候就会中断,请问怎么配置合适?核心参数是什么??
现在的配置是参照128c256g等比例缩小的,如下:
# 服务器可用总内存为12c24g
socket = /data/GreatSQL/mysql.sock
loose-skip-binary-as-hex
prompt="(\\D)[\\u@GreatSQL][\\d]>"
no-auto-rehash
user = mysql
port = 3306
server_id = 1
basedir = /usr/
datadir = /data/GreatSQL
socket = /data/GreatSQL/mysql.sock
pid-file = mysql.pid
character-set-server = UTF8MB4
skip_name_resolve = ON
#掽运行在境外,根据实际情况调整本参数
default_time_zone = "+8:00"
bind_address = "0.0.0.0"
secure_file_priv = /data/GreatSQL
lower_case_table_names = 0
#performance setttings
# 锁等待超时时间(s)
lock_wait_timeout = 3600
# 系统允许的最大打开文件数,调低至适合小内存的值
open_files_limit = 10240
# 连接排队等待的最大请求数
back_log = 512
# 最大连接数
max_connections = 1024
# 最大连接错误数
max_connect_errors = 10000
# 表缓存和表定义缓存
table_open_cache = 512
table_definition_cache = 512
# 排序和连接缓存大小
sort_buffer_size = 2M
join_buffer_size = 2M
# 读取缓存
read_buffer_size = 4M
read_rnd_buffer_size = 4M
# 批量插入缓存大小
bulk_insert_buffer_size = 32M
# 线程缓存
thread_cache_size = 50
# # 交互式超时时间
interactive_timeout = 600
wait_timeout = 600
# 临时表大小和最大堆表大小
tmp_table_size = 16M
max_heap_table_size = 32M
#log settings
# 时间戳设置
log_timestamps = SYSTEM
# 错误日志路径
log_error = error.log
# 错误日志详细等级
log_error_verbosity = 3
# 慢查询日志开关
slow_query_log = ON
# 慢查询日志增加额外信息
log_slow_extra = ON
# 慢查询日志文件路径
slow_query_log_file = slow.log
# 慢查询的时间阈值
long_query_time = 0.1
# 记录未使用索引的查询
log_queries_not_using_indexes = ON
# 对未使用索引查询的日志写入进行限流
log_throttle_queries_not_using_indexes = 6000
# 设置记录慢查询的最低检查行数
min_examined_row_limit = 1000
# 记录慢速管理语句
log_slow_admin_statements = ON
# 记录慢速的从库语句
log_slow_slave_statements = ON
# 慢查询日志详细级别,保留为FULL
log_slow_verbosity = FULL
# 启用二进制日志并设置文件路径
log_bin = /data/GreatSQL/binlog
# 二进制日志格式设置为 ROW 格式
binlog_format = ROW
# 同步二进制日志的频率(s),设置为较低值以降低写频率
sync_binlog = 1
# 二进制日志缓存大小,保持为小值G
binlog_cache_size = 2M
# 二进制日志缓存的最大大小,减小上限以适应内存
max_binlog_cache_size = 2G
# 单个二进制日志文件的最大大小
max_binlog_size = 1G
# 记录行查询日志事件
binlog_rows_query_log_events = 1
# 二进制日志过期时间
binlog_expire_logs_seconds = 604800
# 二进制日志的校验方式
binlog_checksum = CRC32
# 二进制日志提交顺序,关闭有助于提升写入性能
binlog_order_commits = OFF
# 启用 GTID 并确保一致性,保留原值
gtid_mode = ON
enforce_gtid_consistency = TRUE
#myisam settings
key_buffer_size = 32M
myisam_sort_buffer_size = 128M
relay_log_recovery = 1
# 并行复制类型,
slave_parallel_type = LOGICAL_CLOCK
#并行复制线程数可以设置为逻辑CPU数量的1或2倍
slave_parallel_workers = 6
# 二进制日志事务依赖跟踪
binlog_transaction_dependency_tracking = WRITESET
# 保持提交顺序,
slave_preserve_commit_order = 1
# 从库检查点周期
slave_checkpoint_period = 2
#启用InnoDB并行查询优化功能
loose-force_parallel_execute = OFF
#设置每个SQL语句的并行查询最大并发度
loose-parallel_default_dop = 4
#设置系统中总的并行查询线程数,可以和最大逻辑CPU数量一样
loose-parallel_max_threads = 8
#并行执行时leader线程和worker线程使用的总内存大小上限,可以设置物理内存的5-10%左右
loose-parallel_memory_limit = 1G
#mgr settings
# MGR 插件加载
loose-plugin_load_add = 'mysql_clone.so'
loose-plugin_load_add = 'group_replication.so'
# MGR组名
loose-group_replication_group_name = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa1"
loose-group_replication_local_address = "172.16.0.3:33051"
loose-group_replication_group_seeds = '172.16.0.2:33051,172.16.0.3:33051'
# 设置为随服务器启动时自动加入集群
loose-group_replication_start_on_boot = ON
# 初次启动时不自动引导群集
loose-group_replication_bootstrap_group = OFF
# 如果发生故障,将节点设置为只读模式
loose-group_replication_exit_state_action = READ_ONLY
# 关闭流量控制模式以减少延迟
loose-group_replication_flow_control_mode = "DISABLED"
# 启用单主模式
loose-group_replication_single_primary_mode = ON
# 设置MGR多数决
loose-group_replication_majority_after_mode = ON
# 最大消息大小,适应小内存服务器
loose-group_replication_communication_max_message_size = 6M
# 禁用仲裁节点
loose-group_replication_arbitrator = 0
# 单主快速切换
loose-group_replication_single_primary_fast_mode = 1
# 请求的时间阈值
loose-group_replication_request_time_threshold = 100
# 主节点选举模式
loose-group_replication_primary_election_mode = GTID_FIRST
# 超时设置
loose-group_replication_unreachable_majority_timeout = 0
loose-group_replication_member_expel_timeout = 5
loose-group_replication_autorejoin_tries = 288
loose-group_replication_recovery_get_public_key = ON
# 设置报告 IP
report_host = "172.16.0.3"
#innodb settings
# InnoDB 缓冲池大小:建议设为内存的 50%-70%,如 1.5G
innodb_buffer_pool_size = 512M
# 缓冲池实例数量,设置为 1 因为内存较小,多个实例会造成内存开销
innodb_buffer_pool_instances = 3
# InnoDB 数据文件路径,保持自动扩展
innodb_data_file_path = ibdata1:12M:autoextend
# 日志刷新设置
innodb_flush_log_at_trx_commit = 1
# 日志缓冲大小,减小至 8M 以节省内存
innodb_log_buffer_size = 24M
# 日志文件大小,设置为较小的 128M,减少内存占用
innodb_log_file_size = 512M
# 日志文件组数量
innodb_log_files_in_group = 2
# doublewrite 文件数量
innodb_doublewrite_files = 1
# 最大撤销日志文件大小,适当减小为 512M
innodb_max_undo_log_size = 1G
# 根据服务器IOPS能力适当调整
# 一般配普通SSD盘的话,可以调整到 10000 - 20000
# 配置高端PCIe SSD卡的话,则可以调整的更高,比如 50000 - 80000
# IOPS 能力,根据小服务器设置为较低值
innodb_io_capacity = 10000
innodb_io_capacity_max = 20000
# InnoDB 打开的文件数量,适当减小以节省资源
innodb_open_files = 10000
# 日志刷新方法,保留为 O_DIRECT
innodb_flush_method = O_DIRECT
# LRU 扫描深度
innodb_lru_scan_depth = 1024
# 锁等待超时时间,保留原值
innodb_lock_wait_timeout = 10
# 超时回滚事务
innodb_rollback_on_timeout = 1
# 打印死锁日志
innodb_print_all_deadlocks = 1
# 在线 DDL 日志最大大小,减小到 512M 以节省内存
innodb_online_alter_log_max_size = 2G
# 打印 DDL 日志
innodb_print_ddl_logs = ON
# 启用 InnoDB 状态文件
innodb_status_file = ON
# 关闭 InnoDB 状态输出
innodb_status_output = OFF
# 打印锁信息
innodb_status_output_locks = ON
# 排序缓存,减小为 8M
innodb_sort_buffer_size = 24M
# 自适应哈希索引,关闭以减少内存占用
innodb_adaptive_hash_index = OFF
# NUMA 内存互交,保持关闭
innodb_numa_interleave = OFF
# 自旋等待延迟,保留原值
innodb_spin_wait_delay = 20
innodb_buffer_pool_size 先修改为物理可用内存的50%左右吧,上限可以调高到约70%左右。
你说导入数据时,到那个55万行记录的表就中断,请补充提供具体报错信息或日志,才好分析具体原因。 本帖最后由 Jnu2021 于 2024-12-11 17:45 编辑
yejr 发表于 2024-12-10 10:45
innodb_buffer_pool_size 先修改为物理可用内存的50%左右吧,上限可以调高到约70%左右。
你说导入数据时, ...
我把mem_limit和innodb_buffer_pool_size配置放大到2G后,55w数据的表能正常初始化了。初始化数据共计约350张表,计130MB数据;但是因为配置仍然不够吗,后面仍然会有部分表初始化的过程中丢失数据??我开启了general.log之后查看,每次都是突然中断(甚至sql文件执行到一半,而且每次中断的文件的地方都不一样),也没报错,能正常启动,但是数据缺失???之前mysql的mem_limit是512m,无问题
general.log日志如下:
2024-12-11T14:48:32.114472+08:00 18 Query CREATE TABLE IF NOT EXISTS `xxl_job_log` (
`id` bigint NOT NULL AUTO_INCREMENT,
`job_group` int NOT NULL COMMENT '执行器主键ID',
`job_id` int NOT NULL COMMENT '任务,主键ID',
`executor_address` varchar(255) DEFAULT NULL COMMENT '执行器地址,本次执行的地址',
`handle_msg` text COMMENT '执行-日志',
`alarm_status` tinyint NOT NULL DEFAULT '0' COMMENT '告警状态:0-默认、1-无需告警、2-告警成功、3-告警失败',
PRIMARY KEY (`id`),
KEY `I_trigger_time` (`trigger_time`),
KEY `I_handle_code` (`handle_code`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
/usr/sbin/mysqld, Version: 8.0.32-26 (GreatSQL (GPL), Release 26, Revision a68b3034c3d), Time: 2024-12-11T14:49:11.957434+08:00. started with:
Tcp port: 3306Unix socket: /data/GreatSQL/mysql.sock
Time Id Command Argument
2024-12-11T14:49:12.476709+08:00 0 Execute CREATE TABLE performance_schema.innodb_redo_log_files(
`FILE_ID` BIGINT NOT NULL COMMENT 'Id of the file.',
`FILE_NAME` VARCHAR(2000) NOT NULL COMMENT 'Path to the file.',
`START_LSN` BIGINT NOT NULL COMMENT 'LSN of the first block in the file.',
`END_LSN` BIGINT NOT NULL COMMENT 'LSN after the last block in the file.',
`SIZE_IN_BYTES` BIGINT NOT NULL COMMENT 'Size of the file (in bytes).',
`IS_FULL` TINYINT NOT NULL COMMENT '1 iff file has no free space inside.',
`CONSUMER_LEVEL` INT NOT NULL COMMENT 'All redo log consumers registered on smaller levels than this value, have already consumed this file.'
)engine = 'performance_schema'
2024-12-11T14:49:13.530994+08:00 11 Connect
2024-12-11T14:49:13.531198+08:00 11 Query SET SESSION group_replication_consistency= EVENTUAL
2024-12-11T14:49:13.538100+08:00 11 Quit
2024-12-11T14:49:20.256244+08:00 14 Connect test123@127.0.0.1 on nacos_config using SSL/TLS
2024-12-11T14:49:20.260965+08:00 14 Query /* mysql-connector-java-8.0.28 (Revision: 7ff2161da3899f379fb3171b6538b191b1c5c7e2) */SELECT@@session.auto_increment_increment AS auto_increment_increment, @@character_set_client AS character_set_client, @@character_set_connection AS character_set_connection, @@character_set_results AS character_set_results, @@character_set_server AS character_set_server, @@collation_server AS collation_server, @@collation_connection AS collation_connection, @@init_connect AS init_connect, @@interactive_timeout AS interactive_timeout, @@license AS license, @@lower_case_table_names AS lower_case_table_names, @@max_allowed_packet AS max_allowed_packet, @@net_write_timeout AS net_write_timeout, @@performance_schema AS performance_schema, @@sql_mode AS sql_mode, @@system_time_zone AS system_time_zone, @@time_zone AS time_zone, @@transaction_isolation AS transaction_isolation, @@wait_timeout AS wait_timeout
2024-12-11T14:49:20.273733+08:00 14 Query SET character_set_results = NULL
2024-12-11T14:49:20.274633+08:00 14 Query SET autocommit=1
2024-12-11T14:49:20.280088+08:00 14 Query SELECT @@session.transaction_read_only
2024-12-11T14:49:20.288840+08:00 14 Query SELECT 1
2024-12-11T14:49:20.301047+08:00 14 Query SELECT @@session.transaction_isolation
Jnu2021 发表于 2024-12-11 17:11
我把mem_limit和innodb_buffer_pool_size配置放大到2G后,55w数据的表能正常初始化了。初始化数据共计约35 ...
你所说的 mem_limit 是指什么,请提供详细信息,以及如何配置的
怀疑是两种可能
1. 你所说的 mem_limit 限制了 GreatSQL 能使用的内存,达到上限后被kill了,需要配合查看系统日志(dmesg,/var/log/messages 等)
2. GreatSQL实例消耗内存较大,触发了OOM,被系统kill了,也需要配合查看系统日志
P.S,如果不能提供详细日志、配置信息、错误信息等,只有过程描述,问题几乎是没办法排查的。
yejr 发表于 2024-12-12 11:03
你所说的 mem_limit 是指什么,请提供详细信息,以及如何配置的
怀疑是两种可能
mem_limit指的是docker-compose.yml里面的deploy:resources:limits:memory
之前在512m的时候会出现oom,被系统干掉进程
后来我放大到2G后就没出现这个问题了,下面是error.log日志,greatsql-init.sh执行初始化脚本呢,突然就出下面的日志重启了,然后redo也没能挽回没初始化的数据
2024-12-12T14:04:22.600673+08:00 1 Initializing buffer pool, total size = 2.250000G, instances = 3, chunk size =128.000000M
2024-12-12T14:04:23.083338+08:00 1 Completed initialization of buffer pool
2024-12-12T14:04:23.139222+08:00 1 Using './#ib_16384_0.dblwr' for doublewrite
2024-12-12T14:04:23.253250+08:00 1 Double write buffer files: 1
2024-12-12T14:04:23.253328+08:00 1 Double write buffer pages per instance: 4
2024-12-12T14:04:23.253371+08:00 1 Using './#ib_16384_0.dblwr' for doublewrite
2024-12-12T14:04:23.398186+08:00 1 The latest found checkpoint is at lsn = 77761702 in redo log file ./#innodb_redo/#ib_redo2.
2024-12-12T14:04:23.398339+08:00 1 The log sequence number 19798922 in the system tablespace does not match the log sequence number 77761702 in the redo log files!
2024-12-12T14:04:23.398370+08:00 1 Database was not shutdown normally!
2024-12-12T14:04:23.398388+08:00 1 Starting crash recovery.
2024-12-12T14:04:23.623597+08:00 1 Starting to parse redo log at lsn = 77761570, whereas checkpoint_lsn = 77761702 and start_lsn = 77761536
2024-12-12T14:04:23.905240+08:00 1 Doing recovery: scanned up to log sequence number 83004416 Jnu2021 发表于 2024-12-12 15:37
mem_limit指的是docker-compose.yml里面的deploy:resources:limits:memory
之前在512m的时候会出现oom, ...
docker限制了mem_limit=512M,在docker内的GreatSQL设置ibp=512M,那大概率就是触发OOM了,查下系统日志就知道 为啥要LIMIT 512MB 呢? 哦 原来你把GREATE SQL 放进DOCTER 里跑生产数据啊
页:
[1]