mydumper使用记录
参考:
http://t.zoukankan.com/liang545621-p-7497461.html
https://www.jianshu.com/p/27e6ac408969
https://www.cnblogs.com/xiaoyaojinzhazhadehangcheng/p/15098594.html
https://developer.51cto.com/article/685874.html
mydumper常用命令速览
#1、备份多个指定的表数据
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98,sbtest99,sbtest100 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
#2、备份全部数据库包含触发器、事件、存储过程及函数、包括系统库表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#3、备份全部数据库包含触发器、事件、存储过程及函数、不包括系统库表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#4、备份指定库
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
#5、只备份表结构
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-data --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#6、只备份表数据
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-schemas --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#7、压缩备份某个表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --compress --trx-consistency-only
#8、不备份某(几)个库
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql\.|sys\.|information_schema.|performance_schema.))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
实验环境说明
10.105.114.34 mydumper 64核256G 3.5T nvme0n1
10.105.114.35 mysql5.7.29 64核256G 3.5T nvme0n1
# 创建账号
create user 'bak'@'10.%' identified by 'cjLXUFE0uQja6DR';
grant all privileges on *.* to 'bak'@'10.%';
# 创建数据
create database sbtest;
# 备份实验表
sbtest1~sbtest100
# 每张表5k万记录,每张表大小12G
# 目标库 IP:10.105.114.35 端口:7306
# 备份目标目录:[root@spa-34-114-105 data]# mkdir -p /data/backup/{data,log}
测试结果概览
# 3张表,共计:28G,3线程, 耗费8分钟。
开始时间:2022-07-29 15:02:58
结束时间:2022-07-29 15:10:43
# 1.2T 业务库, 100张表,每张表5k万记录。100线程,耗费20分钟。
开始时间:2022-07-29 15:34:44
结束时间:2022-07-29 15:55:41
1、备份多个指定的表数据
#1、备份多个指定的表数据
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98,sbtest99,sbtest100 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 3 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
2、备份全部数据库 包含触发器、事件、存储过程及函数、包括系统库表
#2、备份全部数据库 包含触发器、事件、存储过程及函数、包括系统库表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
3、备份全部数据库包含触发器、事件、存储过程及函数、不包括系统库表
#3、备份全部数据库 包含触发器、事件、存储过程及函数、不包括系统库表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
4、备份指定库
#4、备份指定库
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
5、只备份表结构
#5、只备份表结构
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-data --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
6、只备份表数据
#6、只备份表数据
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-schemas --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
7、压缩备份某个表
#7、压缩备份某个表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --compress --trx-consistency-only
8、不备份某(几)个库
#8、不备份某(几)个库
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql\.|sys\.|information_schema.|performance_schema.))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
mydumper主要流程概括
mydumper主要流程概括:
1、主线程 FLUSH TABLES WITH READ LOCK, 施加全局只读锁,以阻止DML语句写入,保证数据的一致性
2、读取当前时间点的二进制日志文件名和日志写入的位置并记录在metadata文件中,以供即使点恢复使用
3、N个(线程数可以指定,默认是4)dump线程 START TRANSACTION WITH CONSISTENT SNAPSHOT; 开启读一致的事务
4、dump non-InnoDB tables, 首先导出非事务引擎的表
5、主线程 UNLOCK TABLES 非 事务引擎备份完后,释放全局只读锁
6、dump InnoDB tables, 基于 事务导出InnoDB表
7、事务结束
mydumper使用--less-locking可以减少锁等待时间,此时mydumper的执行机制大致为
1、主线程 FLUSH TABLES WITH READ LOCK (全局锁)
2、Dump线程 START TRANSACTION WITH CONSISTENT SNAPSHOT;
3、LL Dump线程 LOCK TABLES non-InnoDB (线程内部锁)
4、主线程UNLOCK TABLES
5、LL Dump线程 dump non-InnoDB tables
6、LL DUmp线程 UNLOCK non-InnoDB
7、Dump线程 dump InnoDB tables
mydumper安装
官网地址:https://launchpad.net/mydumper
GitHub 地址:https://github.com/maxbube/mydumper
参考官方介绍,mydumper 主要有以下几点特性:
支持多线程导出数据,速度更快。
支持一致性备份。
支持将导出文件压缩,节约空间。
支持多线程恢复。
支持以守护进程模式工作,定时快照和连续二进制日志。
支持按照指定大小将备份文件切割。
数据与建表语句分离。
[root@spa-34-114-105 ~]# yum install -y glib2-devel zlib-devel pcre-devel openssl-devel
[root@spa-34-114-105 ~]# ll mydumper-0.12.5-2.el7.x86_64.rpm
-rw-r--r-- 1 root root 2736370 Jul 18 11:39 mydumper-0.12.5-2.el7.x86_64.rpm
[root@spa-34-114-105 ~]# rpm -ivh mydumper-0.12.5-2.el7.x86_64.rpm
Preparing... ################################# [100%]
Updating / installing...
1:mydumper-0.12.5-2 ################################# [100%]
[root@spa-34-114-105 ~]#
查看mydumper帮助
[root@spa-34-114-105 ~]# mydumper --help
Usage:
mydumper [OPTION…] multi-threaded MySQL dumping
Help Options:
-?, --help Show help options
Application Options:
-B, --database Database to dump
-o, --outputdir Directory to output files to
-d, --no-data Do not dump table data
-D, --daemon Enable daemon mode
-L, --logfile Log file name to use, by default stdout is used
--disk-limits Set the limit to pause and resume if determines there is no enough disk space.Accepts values like: '<resume>:<pause>' in MB.For instance: 100:500 will pause when there is only 100MB free and willresume if 500MB are available
-t, --threads Number of threads to use, default 4
-C, --compress-protocol Use compression on the MySQL connection
-V, --version Show the program version and exit
-v, --verbose Verbosity of output, 0 = silent, 1 = errors, 2 = warnings, 3 = info, default 2
--defaults-file Use a specific defaults file
--ssl Connect using SSL
--ssl-mode Desired security state of the connection to the server: DISABLED, PREFERRED, REQUIRED, VERIFY_CA, VERIFY_IDENTITY
--key The path name to the key file
--cert The path name to the certificate file
--ca The path name to the certificate authority file
--capath The path name to a directory that contains trusted SSL CA certificates in PEM format
--cipher A list of permissible ciphers to use for SSL encryption
--tls-version Which protocols the server permits for encrypted connections
--stream It will stream over STDOUT once the files has been written
--no-delete It will not delete the files after stream has been completed
-O, --omit-from-file File containing a list of database.table entries to skip, one per line (skips before applying regex option)
-T, --tables-list Comma delimited table list to dump (does not exclude regex option)
-h, --host The host to connect to
-u, --user Username with the necessary privileges
-p, --password User password
-a, --ask-password Prompt For User password
-P, --port TCP/IP port to connect to
-S, --socket UNIX domain socket file to use for connection
-x, --regex Regular expression for 'db.table' matching
-G, --triggers Dump triggers. By default, it do not dump triggers
--split-partitions Dump partitions into separate files. This options overrides the --rows option for partitioned tables.
--max-rows Limit the number of rows per block after the table is estimated, default 1000000
--no-check-generated-fields Queries related to generated fields are not going to be executed.It will lead to restoration issues if you have generated columns
--order-by-primary Sort the data by Primary Key or Unique key if no primary key exists
-E, --events Dump events. By default, it do not dump events
-R, --routines Dump stored procedures and functions. By default, it do not dump stored procedures nor functions
-W, --no-views Do not dump VIEWs
-M, --checksum-all Dump checksums for all elements
--data-checksums Dump table checksums with the data
--schema-checksums Dump schema table and view creation checksums
--routine-checksums Dump triggers, functions and routines checksums
--tz-utc SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data when a server has data in different time zones or data is being moved between servers with different time zones, defaults to on use --skip-tz-utc to disable.
--complete-insert Use complete INSERT statements that include column names
--skip-tz-utc
-z, --tidb-snapshot Snapshot to use for TiDB
-N, --insert-ignore Dump rows with INSERT IGNORE
--replace Dump rows with REPLACE
--exit-if-broken-table-found Exits if a broken table has been found
--success-on-1146 Not increment error count and Warning instead of Critical in case of table doesn't exist
--use-savepoints Use savepoints to reduce metadata locking issues, needs SUPER privilege
-s, --statement-size Attempted size of INSERT statement in bytes, default 1000000
-F, --chunk-filesize Split tables into chunks of this output file size. This value is in MB
-e, --build-empty-files Build dump files even if no data available from table
--where Dump only selected records.
-i, --ignore-engines Comma delimited list of storage engines to ignore
--load-data
--csv Automatically enables --load-data and set variables to export in CSV format.
--fields-terminated-by
--fields-enclosed-by
--fields-escaped-by Single character that is going to be used to escape characters in theLOAD DATA stament, default: '\'
--lines-starting-by Adds the string at the begining of each row. When --load-data is usedit is added to the LOAD DATA statement. Its affects INSERT INTO statementsalso when it is used.
--lines-terminated-by Adds the string at the end of each row. When --load-data is used it isadded to the LOAD DATA statement. Its affects INSERT INTO statementsalso when it is used.
--statement-terminated-by This might never be used, unless you know what are you doing
-r, --rows Try to split tables into chunks of this many rows. This option turns off --chunk-filesize
-c, --compress Compress output files
--exec Command to execute using the file as parameter
--long-query-retries Retry checking for long queries, default 0 (do not retry)
--long-query-retry-interval Time to wait before retrying the long query check in seconds, default 60
-l, --long-query-guard Set long query timer in seconds, default 60
--tidb-snapshot Snapshot to use for TiDB
-U, --updated-since Use Update_time to dump only tables updated in the last U days
-k, --no-locks Do not execute the temporary shared read lock. WARNING: This will cause inconsistent backups
-Y, --all-tablespaces Dump all the tablespaces.
--no-backup-locks Do not use Percona backup locks
--lock-all-tables Use LOCK TABLE for all, instead of FTWRL
--less-locking Minimize locking time on InnoDB tables.
--trx-consistency-only Transactional consistency only
-m, --no-schemas Do not dump table schemas with the data and triggers
-K, --kill-long-queries Kill long running queries (instead of aborting)
--set-names Sets the names, use it at your own risk, default binary
--pmm-path which default value will be /usr/local/percona/pmm2/collectors/textfile-collector/high-resolution
--pmm-resolution which default will be high
-I, --snapshot-interval Interval between each dump snapshot (in minutes), requires --daemon, default 60
-X, --snapshot-count number of snapshots, default 2
[root@spa-34-114-105 ~]#
myloader参数详解
-d, --directory Directory of the dump to import
-q, --queries-per-transaction Number of queries per transaction, default 1000
-o, --overwrite-tables Drop tables if they already exist
--append-if-not-exist Appends IF NOT EXISTS to the create table statements. This will be removed when https://bugs.mysql.com/bug.php?id=103791 has been implemented
-B, --database An alternative database to restore into
-s, --source-db Database to restore
-e, --enable-binlog Enable binary logging of the restore data
--innodb-optimize-keys Creates the table without the indexes and it adds them at the end
--set-names Sets the names, use it at your own risk, default binary
-L, --logfile Log file name to use, by default stdout is used
--purge-mode This specify the truncate mode which can be: NONE, DROP, TRUNCATE and DELETE
--disable-redo-log Disables the REDO_LOG and enables it after, doesn't check initial status
-r, --rows Split the INSERT statement into this many rows.
--max-threads-per-table Maximum number of threads per table to use, default 4
--skip-triggers Do not import triggers. By default, it imports triggers
--skip-post Do not import events, stored procedures and functions. By default, it imports events, stored procedures nor functions
--no-data Do not dump or import table data
--serialized-table-creation Table recreation will be executed in serie, one thread at a time
--resume Expect to find resume file in backup dir and will only process those files
--pmm-path which default value will be /usr/local/percona/pmm2/collectors/textfile-collector/high-resolution
--pmm-resolution which default will be high
-t, --threads Number of threads to use, default 4
-C, --compress-protocol Use compression on the MySQL connection
-V, --version Show the program version and exit
-v, --verbose Verbosity of output, 0 = silent, 1 = errors, 2 = warnings, 3 = info, default 2
--defaults-file Use a specific defaults file
--ssl Connect using SSL
--ssl-mode Desired security state of the connection to the server: DISABLED, PREFERRED, REQUIRED, VERIFY_CA, VERIFY_IDENTITY
--key The path name to the key file
--cert The path name to the certificate file
--ca The path name to the certificate authority file
--capath The path name to a directory that contains trusted SSL CA certificates in PEM format
--cipher A list of permissible ciphers to use for SSL encryption
--tls-version Which protocols the server permits for encrypted connections
--stream It will stream over STDOUT once the files has been written
--no-delete It will not delete the files after stream has been completed
-O, --omit-from-file File containing a list of database.table entries to skip, one per line (skips before applying regex option)
-T, --tables-list Comma delimited table list to dump (does not exclude regex option)
-h, --host The host to connect to
-u, --user Username with the necessary privileges
-p, --password User password
-a, --ask-password Prompt For User password
-P, --port TCP/IP port to connect to
-S, --socket UNIX domain socket file to use for connection
-x, --regex Regular expression for 'db.table' matching
--skip-definer Removes DEFINER from the CREATE statement. By default, statements are not modified
1、单表恢复
# 无法直接还原单表 只能找到单表的sql文件 进入命令行source 执行, 如果表结构不存在,需要先创建表结构
mysql --host 10.105.114.35 --user=bak --password=cjLXUFE0uQja6DR --port=7306 db2 < /data/backup/data/0/sbtest.sbtest98.00000.sql &
2、单库恢复
# 排除系统库,只备份业务
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test|sys|performance_schema|information_schema))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
# 从全备中恢复sbtest库到指定的db2
# 先删除目标: drop database db2;
myloader --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database db2 --overwrite-tables --source-db sbtest --directory /data/backup/data/0/ --queries-per-transaction 1000 --enable-binlog --threads 8 --compress-protocol --verbose 3 --logfile /data/backup/log/db_recover.log --purge-mode TRUNCATE --max-threads-per-table 4 --append-if-not-exist &
3、恢复全部的业务库
# 排除系统库,只备份业务
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test|sys|performance_schema|information_schema))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
# 恢复全部的业务库
myloader --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --overwrite-tables --directory /data/backup/data/0/ --queries-per-transaction 1000 --enable-binlog --threads 8 --compress-protocol --verbose 3 --logfile /data/backup/log/db_recover.log --purge-mode TRUNCATE --max-threads-per-table 4 --append-if-not-exist &
标签:mydumper,记录,--,data,long,使用,backup,log From: https://www.cnblogs.com/bjx2020/p/16662401.html