[mysql@node01 ~]# uuidgen
8d1945a5-5c74-4ba0-8240-e9d731110753
[mysql@node01 ~]$ vim /etc/my.cnf
server_id=101
log_bin=mysql-bin
binlog_cache_size=16M
max_binlog_size=128M
binlog_expire_logs_seconds=2592000
binlog_rows_query_log_events=on
binlog_transaction_dependency_tracking = WRITESET
relay_log = relay-bin
relay_log_recovery=on
gtid_mode = on
enforce_gtid_consistency = on
report_host=node01
super_read_only=on
plugin_load_add='group_replication.so'
loose_group_replication_group_name = "8d1945a5-5c74-4ba0-8240-e9d731110753"
loose_group_replication_start_on_boot = off
loose_group_replication_bootstrap_group = off
loose_group_replication_local_address = "192.168.1.101:33061"
loose_group_replication_group_seeds="192.168.1.101:33061,192.168.1.102:33061,192.168.1.103:33061"
loose_group_replication_ip_allowlist="192.168.1.0/24"
loose_group_replication_recovery_get_public_key=on
[mysql@node01 ~]$ /etc/init.d/mysql.server start
[mysql@node01 ~]$ /etc/init.d/mysql.server status
(root@node01) > create user repl@'%' identified by 'repl';
Query OK, 0 rows affected (0.02 sec)
(root@node01) > grant replication slave on *.* to repl@'%';
Query OK, 0 rows affected (0.03 sec)
(root@node01) > show grants for repl@'%';
+----------------------------------------------+
| Grants for repl@% |
+----------------------------------------------+
| GRANT REPLICATION SLAVE ON *.* TO `repl`@`%` |
+----------------------------------------------+
1 row in set (0.00 sec)
(root@node01) > change replication source to source_user='repl',source_password='repl' for channel 'group_replication_recovery';
Query OK, 0 rows affected, 1 warning (0.03 sec)
(root@node01) > set @@global.group_replication_bootstrap_group=on;
Query OK, 0 rows affected (0.00 sec)
(root@node01) > start group_replication;
Query OK, 0 rows affected (1.62 sec)
(root@node01) > set @@global.group_replication_bootstrap_group=off;
Query OK, 0 rows affected (0.00 sec)
(root@node01) > select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| group_replication_applier | 0701007b-3000-11ee-be12-0800272a1c19 | node01 | 3306 | ONLINE | PRIMARY | 8.0.32 | XCom |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
1 row in set (0.00 sec)
(root@node01) > show global variables like 'super_read_only';
+-----------------+-------+
| Variable_name | Value |
+-----------------+-------+
| super_read_only | OFF |
+-----------------+-------+
1 row in set (0.06 sec)
[mysql@node01 ~]$ ssh-keygen
[mysql@node01 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub mysql@node02
[mysql@node01 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub mysql@node03
[mysql@node01 ~]$ xtrabackup --user=xtrabackup --password=xtrabackup --backup --target-dir=/home/mysql/backup/full
[mysql@node01 ~]$ scp -r backup/full/* node02:/home/mysql/backup/full
[mysql@node01 ~]$ scp -r backup/full/* node03:/home/mysql/backup/full
[mysql@node02 ~]$ vim /etc/my.cnf
server_id=102
log_bin=mysql-bin
binlog_cache_size=16M
max_binlog_size=128M
binlog_expire_logs_seconds=2592000
binlog_rows_query_log_events=on
binlog_transaction_dependency_tracking = WRITESET
relay_log = relay-bin
relay_log_recovery=on
gtid_mode = on
enforce_gtid_consistency = on
report_host=node02
super_read_only=on
plugin_load_add='group_replication.so'
loose_group_replication_group_name = "8d1945a5-5c74-4ba0-8240-e9d731110753"
loose_group_replication_start_on_boot = off
loose_group_replication_bootstrap_group = off
loose_group_replication_local_address = "192.168.1.102:33061"
loose_group_replication_group_seeds="192.168.1.101:33061,192.168.1.102:33061,192.168.1.103:33061"
loose_group_replication_ip_allowlist="192.168.1.0/24"
loose_group_replication_recovery_get_public_key=on
[mysql@node02 ~]$ xtrabackup --prepare --target-dir=/home/mysql/backup/full
[mysql@node02 ~]$ xtrabackup --copy-back --target-dir=/home/mysql/backup/full
[mysql@node02 ~]$ /etc/init.d/mysql.server start
[mysql@node02 ~]$ /etc/init.d/mysql.server status
(root@node02) > change replication source to source_user='repl',source_password='repl' for channel 'group_replication_recovery';
Query OK, 0 rows affected, 1 warning (0.03 sec)
(root@node02) > start group_replication;
Query OK, 0 rows affected (2.04 sec)
(root@node02) > select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| group_replication_applier | 0701007b-3000-11ee-be12-0800272a1c19 | node01 | 3306 | ONLINE | PRIMARY | 8.0.32 | XCom |
| group_replication_applier | cbc01ee0-313d-11ee-b892-08002706f104 | node02 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
2 rows in set (0.04 sec)
(root@node02) > show global variables like 'super_read_only';
+-----------------+-------+
| Variable_name | Value |
+-----------------+-------+
| super_read_only | ON |
+-----------------+-------+
1 row in set (0.03 sec)
[mysql@node03 ~]$ vim /etc/my.cnf
server_id=103
log_bin=mysql-bin
binlog_cache_size=16M
max_binlog_size=128M
binlog_expire_logs_seconds=2592000
binlog_rows_query_log_events=on
binlog_transaction_dependency_tracking = WRITESET
relay_log = relay-bin
relay_log_recovery=on
gtid_mode = on
enforce_gtid_consistency = on
report_host=node03
super_read_only=on
plugin_load_add='group_replication.so'
loose_group_replication_group_name = "8d1945a5-5c74-4ba0-8240-e9d731110753"
loose_group_replication_start_on_boot = off
loose_group_replication_bootstrap_group = off
loose_group_replication_local_address = "192.168.1.103:33061"
loose_group_replication_group_seeds="192.168.1.101:33061,192.168.1.102:33061,192.168.1.103:33061"
loose_group_replication_ip_allowlist="192.168.1.0/24"
loose_group_replication_recovery_get_public_key=on
[mysql@node03 ~]$ xtrabackup --prepare --target-dir=/home/mysql/backup/full
[mysql@node03 ~]$ xtrabackup --copy-back --target-dir=/home/mysql/backup/full
[mysql@node03 ~]$ /etc/init.d/mysql.server start
[mysql@node03 ~]$ /etc/init.d/mysql.server status
(root@node03) > change replication source to source_user='repl',source_password='repl' for channel 'group_replication_recovery';
Query OK, 0 rows affected, 1 warning (0.03 sec)
(root@node03) > start group_replication;
Query OK, 0 rows affected (2.04 sec)
(root@node03) > select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| group_replication_applier | 0701007b-3000-11ee-be12-0800272a1c19 | node01 | 3306 | ONLINE | PRIMARY | 8.0.32 | XCom |
| group_replication_applier | cbc01ee0-313d-11ee-b892-08002706f104 | node02 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
| group_replication_applier | e2e78b1e-3142-11ee-b142-080027f8683b | node03 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
3 rows in set (0.06 sec)
(root@node03) > show global variables like 'super_read_only';
+-----------------+-------+
| Variable_name | Value |
+-----------------+-------+
| super_read_only | ON |
+-----------------+-------+
1 row in set (0.01 sec)
(root@node01) > select * from performance_schema.replication_group_member_stats\G;
*************************** 1. row ***************************
CHANNEL_NAME: group_replication_applier
VIEW_ID: 16910291952308333:5
MEMBER_ID: 0701007b-3000-11ee-be12-0800272a1c19
COUNT_TRANSACTIONS_IN_QUEUE: 0
COUNT_TRANSACTIONS_CHECKED: 261
COUNT_CONFLICTS_DETECTED: 0
COUNT_TRANSACTIONS_ROWS_VALIDATING: 804
TRANSACTIONS_COMMITTED_ALL_MEMBERS: 8d1945a5-5c74-4ba0-8240-e9d731110753:1-446
LAST_CONFLICT_FREE_TRANSACTION: 8d1945a5-5c74-4ba0-8240-e9d731110753:503
COUNT_TRANSACTIONS_REMOTE_IN_APPLIER_QUEUE: 0
COUNT_TRANSACTIONS_REMOTE_APPLIED: 4
COUNT_TRANSACTIONS_LOCAL_PROPOSED: 261
COUNT_TRANSACTIONS_LOCAL_ROLLBACK: 0
*************************** 2. row ***************************
CHANNEL_NAME: group_replication_applier
VIEW_ID: 16910291952308333:5
MEMBER_ID: cbc01ee0-313d-11ee-b892-08002706f104
COUNT_TRANSACTIONS_IN_QUEUE: 0
COUNT_TRANSACTIONS_CHECKED: 261
COUNT_CONFLICTS_DETECTED: 0
COUNT_TRANSACTIONS_ROWS_VALIDATING: 804
TRANSACTIONS_COMMITTED_ALL_MEMBERS: 8d1945a5-5c74-4ba0-8240-e9d731110753:1-446
LAST_CONFLICT_FREE_TRANSACTION: 8d1945a5-5c74-4ba0-8240-e9d731110753:503
COUNT_TRANSACTIONS_REMOTE_IN_APPLIER_QUEUE: 0
COUNT_TRANSACTIONS_REMOTE_APPLIED: 261
COUNT_TRANSACTIONS_LOCAL_PROPOSED: 0
COUNT_TRANSACTIONS_LOCAL_ROLLBACK: 0
*************************** 3. row ***************************
CHANNEL_NAME: group_replication_applier
VIEW_ID: 16910291952308333:5
MEMBER_ID: e2e78b1e-3142-11ee-b142-080027f8683b
COUNT_TRANSACTIONS_IN_QUEUE: 0
COUNT_TRANSACTIONS_CHECKED: 261
COUNT_CONFLICTS_DETECTED: 0
COUNT_TRANSACTIONS_ROWS_VALIDATING: 804
TRANSACTIONS_COMMITTED_ALL_MEMBERS: 8d1945a5-5c74-4ba0-8240-e9d731110753:1-446
LAST_CONFLICT_FREE_TRANSACTION: 8d1945a5-5c74-4ba0-8240-e9d731110753:503
COUNT_TRANSACTIONS_REMOTE_IN_APPLIER_QUEUE: 0
COUNT_TRANSACTIONS_REMOTE_APPLIED: 262
COUNT_TRANSACTIONS_LOCAL_PROPOSED: 0
COUNT_TRANSACTIONS_LOCAL_ROLLBACK: 0
3 rows in set (0.00 sec)
单主模式组复制primary角色的switchover
(root@node01) > select group_replication_set_as_primary('cbc01ee0-313d-11ee-b892-08002706f104');
+--------------------------------------------------------------------------+
| group_replication_set_as_primary('cbc01ee0-313d-11ee-b892-08002706f104') |
+--------------------------------------------------------------------------+
| Primary server switched to: cbc01ee0-313d-11ee-b892-08002706f104 |
+--------------------------------------------------------------------------+
1 row in set (0.05 sec)
(root@node01) > select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| group_replication_applier | 0701007b-3000-11ee-be12-0800272a1c19 | node01 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
| group_replication_applier | cbc01ee0-313d-11ee-b892-08002706f104 | node02 | 3306 | ONLINE | PRIMARY | 8.0.32 | XCom |
| group_replication_applier | e2e78b1e-3142-11ee-b142-080027f8683b | node03 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
3 rows in set (0.00 sec)
单主模式组复制primary角色的failover
[mysql@node02 ~]$ /etc/init.d/mysql.server stop
(root@node01) > select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| group_replication_applier | 0701007b-3000-11ee-be12-0800272a1c19 | node01 | 3306 | ONLINE | PRIMARY | 8.0.32 | XCom |
| group_replication_applier | e2e78b1e-3142-11ee-b142-080027f8683b | node03 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
2 rows in set (0.00 sec)
[mysql@node02 ~]$ /etc/init.d/mysql.server start
(root@node02) > start group_replication;
Query OK, 0 rows affected (4.31 sec)
(root@node01) > select * from performance_schema.replication_group_members;
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | MEMBER_VERSION | MEMBER_COMMUNICATION_STACK |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
| group_replication_applier | 0701007b-3000-11ee-be12-0800272a1c19 | node01 | 3306 | ONLINE | PRIMARY | 8.0.32 | XCom |
| group_replication_applier | cbc01ee0-313d-11ee-b892-08002706f104 | node02 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
| group_replication_applier | e2e78b1e-3142-11ee-b142-080027f8683b | node03 | 3306 | ONLINE | SECONDARY | 8.0.32 | XCom |
+---------------------------+--------------------------------------+-------------+-------------+--------------+-------------+----------------+----------------------------+
3 rows in set (0.00 sec)
group_replication_single_primary_mode = ON #default
group_replication_enforce_update_everywhere_checks = OFF #default
group_replication_member_weight = 50 #default 成员权重
group_replication_consistency = EVENTUAL #default 事务一致性 可选值 before_on_primary_failover,before,after,before_and_after
group_replication_compression_threshold = 1000000 #default 约1M
group_replication_communication_max_message_size = 10485760 #default 10M
group_replication_transaction_size_limit = 150000000 #default 约143M
group_replication_message_cache_size = 1073741824 #default 1GB
group_replication_member_expel_timeout = 5 #default 成员驱逐超时
group_replication_unreachable_majority_timeout = 0 #default
group_replication_autorejoin_tries = 3 #default
group_replication_exit_state_action = READ_ONLY #default 可选值 OFFLINE_MODE,ABORT_SERVER
标签:switchover,8.0,node01,group,--,COUNT,MEMBER,replication,mysql
From: https://blog.51cto.com/dbprofessional/7235483