案例说明:
通过NAS共享存储作为数据库存储文件系统,构建RAC架构。
适用版本:
KingbaseES V008R006C008M030B0010
操作系统版本:
[root@node201 KingbaseHA]# cat /etc/centos-release
CentOS Linux release 7.9.2009 (Core)
集群架构:
如下所示,node1和node2为集群节点:
节点信息:
[root@node201 KingbaseHA]# vi /etc/hosts
192.168.1.201 node201
192.168.1.202 node202
192.168.1.203 node203 NAS_Srv
集群软件:
[root@node201 data]# rpm -qa |egrep 'corosync|pacemaker'
corosynclib-2.4.5-7.el7_9.2.x86_64
pacemaker-1.1.23-1.el7_9.1.x86_64
pacemaker-libs-1.1.23-1.el7_9.1.x86_64
pacemaker-doc-1.1.23-1.el7_9.1.x86_64
corosync-qdevice-2.4.5-7.el7_9.2.x86_64
pacemaker-cluster-libs-1.1.23-1.el7_9.1.x86_64
pacemaker-cli-1.1.23-1.el7_9.1.x86_64
corosync-2.4.5-7.el7_9.2.x86_64
一、NAS 配置
如下所示,配置nfs共享文件系统,nfs server配置如下:
# nas server共享文件系统
[root@node203 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
.......
/dev/sdb1 11G 41M 9.9G 1% /shdsk
[root@node203 ~]# mount -v |grep shdsk
/dev/sdb1 on /shdsk type ext4 (rw,relatime,data=ordered)
# 建立nfs共享
[root@node203 ~]# cat /etc/exports
/shdsk *(rw,sync,no_root_squash)
[root@node203 ~]# exportfs -v
/shdsk <world>(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
如下所示,nfs client配置(rac节点作为nfs client):
# 客户端查看server共享
[root@node201 ~]# showmount -e 192.168.1.203
Export list for 192.168.1.203:
/shdsk *
# 客户端挂载nfs共享
[root@node201 ~]# mount -t nfs -o sync,noac,lookupcache=none 192.168.1.203:/shdsk /sharedata/data_nfs/
[root@node201 ~]# mount -v|grep shdsk
192.168.1.203:/shdsk on /sharedata/data_nfs type nfs4 (rw,relatime,sync,vers=4.1,rsize=262144,wsize=262144,namlen=255,acregmin=0,acregmax=0,acdirmin=0,acdirmax=0,hard,noac,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.201,lookupcache=none,local_lock=none,addr=192.168.1.203)
# 客户端配置开机挂载:
[root@node201 ~]# cat /etc/rc.local |grep mount
/usr/bin/mount -t nfs -o sync,noac,lookupcache=none 192.168.1.203:/shdsk /sharedata/data_nfs/
[root@node201 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
......
192.168.1.203:/shdsk 11G 40M 9.9G 1% /sharedata/data_nfs
Tips:
二、环境准备
1、关闭系统防火墙(all nodes)
[root@node201 KingbaseHA]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
2、配置selinux
[root@node201 KingbaseHA]# cat /etc/sysconfig/selinux |grep -v ^$|grep -v ^#
SELINUXTYPE=targeted
SELINUX=disabled
#查看selinux
[root@node201 KingbaseHA]# setenforce 0 ; getenforce
setenforce: SELinux is disabled
Disabled
3、配置ntp时钟同步(建议集群节点时钟同步)
# ntp server配置
[root@node201 KingbaseHA]# cat /etc/ntp.conf
server 127.127.1.0 prefer
fudge 127.127.1.0 stratum 10
restrict 192.168.1.0 255.255.255.0
[root@node202 KingbaseHA]# systemctl start ntpd
# ntp client配置
[root@node202 KingbaseHA]# cat /etc/ntp.conf
server 192.168.1.201
fudge 192.168.1.202 stratum 10
[root@node202 KingbaseHA]# systemctl start ntpd
# 查看时钟同步状态
[root@node202 KingbaseHA]# ntpq -p
remote refid st t when poll reach delay offset jitter
==============================================================================
ntp5.flashdance 194.58.202.20 2 u 30 1024 17 151.904 32.166 8.276
*time.neu.edu.cn .PTP. 1 u 1025 1024 7 56.031 25.537 3.294
+117.80.112.205 133.243.238.163 2 u 1029 1024 7 30.364 42.776 2.418
+111.230.189.174 100.122.36.196 2 u 883 1024 7 40.486 40.348 0.661
-node201 LOCAL(0) 11 u 507 1024 377 0.231 -7.570 7.571
4、创建数据库用户
[kingbase@node201 bin]$ id
uid=200(kingbase) gid=1001(kingbase) groups=1001(kingbase)
[kingbase@node202 bin]$ id
uid=200(kingbase) gid=1001(kingbase) groups=1001(kingbase)
三、部署RAC集群
1、安装数据库软件(所有节点)
[root@node201 soft]# mount -o loop KingbaseES_V008R006C008M030B0010_Lin64_install.iso /mnt
mount: /dev/loop0 is write-protected, mounting read-only
[kingbase@node201 mnt]$ sh setup.sh
Now launch installer...
Choose the server type
----------------------
Please choose the server type :
->1- default
2- rac
Default Install Folder: /opt/Kingbase/ES/V8
2、创建集群部署目录 (all nodes)
如下所示,进入数据库软件部署目录,执行集群脚本,默认创建"/opt/KingbaseHA"目录:
[root@node201 script]# pwd
/opt/Kingbase/ES/V8/install/script
[root@node201 script]# ls -lh
total 32K
-rwxr-xr-x 1 kingbase kingbase 321 Jul 18 14:17 consoleCloud-uninstall.sh
-rwxr-x--- 1 kingbase kingbase 3.6K Jul 18 14:17 initcluster.sh
-rwxr-x--- 1 kingbase kingbase 289 Jul 18 14:17 javatools.sh
-rwxr-xr-x 1 kingbase kingbase 553 Jul 18 14:17 rootDeployClusterware.sh
-rwxr-x--- 1 kingbase kingbase 767 Jul 18 14:17 root.sh
-rwxr-x--- 1 kingbase kingbase 627 Jul 18 14:17 rootuninstall.sh
-rwxr-x--- 1 kingbase kingbase 3.7K Jul 18 14:17 startupcfg.sh
-rwxr-x--- 1 kingbase kingbase 252 Jul 18 14:17 stopserver.sh
# 执行脚本
[root@node201 script]# sh rootDeployClusterware.sh
cp: cannot stat ‘@@INSTALL_DIR@@/KingbaseHA/*’: No such file or directory
# 修改脚本变量
[root@node201 V8]# head install/script/rootDeployClusterware.sh
#!/bin/sh
# copy KingbaseHA to /opt/KingbaseHA
ROOT_UID=0
#INSTALLDIR=@@INSTALL_DIR@@
INSTALLDIR=/opt/Kingbase/ES/V8/KESRealPro/V008R006C008M030B0010
# 执行脚本(创建/opt/KingbaseHA)
[root@node201 V8]# sh install/script/rootDeployClusterware.sh
/opt/KingbaseHA has existed. Do you want to override it?(y/n)y
y
[root@node201 V8]# ls -lh /opt/KingbaseHA/
total 64K
-rw-r--r-- 1 root root 3.8K Jul 30 17:38 cluster_manager.conf
-rwxr-xr-x 1 root root 54K Jul 30 17:38 cluster_manager.sh
drwxr-xr-x 9 root root 121 Jul 30 17:38 corosync
drwxr-xr-x 7 root root 122 Jul 30 17:38 corosync-qdevice
drwxr-xr-x 8 root root 68 Jul 30 17:38 crmsh
drwxr-xr-x 7 root root 65 Jul 30 17:38 dlm-dlm
drwxr-xr-x 5 root root 39 Jul 30 17:38 fence_agents
drwxr-xr-x 5 root root 60 Jul 30 17:38 gfs2
drwxr-xr-x 6 root root 53 Jul 30 17:38 gfs2-utils
drwxr-xr-x 5 root root 39 Jul 30 17:38 ipmi_tool
drwxr-xr-x 7 root root 84 Jul 30 17:38 kingbasefs
drwxr-xr-x 5 root root 42 Jul 30 17:38 kronosnet
drwxr-xr-x 2 root root 4.0K Jul 30 17:38 lib
drwxr-xr-x 2 root root 28 Jul 30 17:38 lib64
drwxr-xr-x 7 root root 63 Jul 30 17:38 libqb
drwxr-xr-x 10 root root 136 Jul 30 17:38 pacemaker
drwxr-xr-x 6 root root 52 Jul 30 17:38 python2.7
3、配置cluster_manager.conf (all nodes)
如下所示,基于NAS的部署,将不再初始化仲裁和votedisk磁盘:
[root@node202 KingbaseHA]# cat cluster_manager.conf |grep -v ^$|grep -v ^#
cluster_name=kcluster
node_name=(node201 node202) # 和/etc/hosts配置一致
node_ip=(192.168.1.201 192.168.1.202)
enable_qdisk=1 # 不使用仲裁磁盘和votedisk
votingdisk=/dev/sde
sharedata_dir=/sharedata/data_nfs # 指定data存储位置(共享存储)
install_dir=/opt/KingbaseHA
env_bash_file=/root/.bashrc
pacemaker_daemon_group=haclient
pacemaker_daemon_user=hacluster
kingbaseowner=kingbase
kingbasegroup=kingbase
kingbase_install_dir=/opt/Kingbase/ES/V8/Server
database="test"
username="system"
password="123456"
initdb_options="-A trust -U $username"
enable_fence=0 # 不使用fence
enable_qdisk_fence=1
install_rac=1
rac_port=54321
rac_lms_port=53444
rac_lms_count=7
.......
4、基础组件初始化 (all nodes)
在所有节点执行如下命令,初始化所有基础组件,如corosync,pacemaker,corosync-qdevice。
[root@node201 KingbaseHA]# ./cluster_manager.sh --base_configure_init
config host start
Host entry 192.168.1.201 node201 found, skiping...
Host entry 192.168.1.202 node202 found, skiping...
config host success
add env varaible in /root/.bashrc
add env variable success
config corosync.conf start
config corosync.conf success
Starting Corosync Cluster Engine (corosync): [ OK ]
add pacemaker daemon user start
groupadd: group 'haclient' already exists
useradd: user 'hacluster' already exists
add pacemaker daemon user success
config pacemaker success
Starting Pacemaker Cluster Manager[ OK ]
config qdevice start
config qdevice success
Starting Corosync Qdevice daemon (corosync-qdevice): [ OK ]
Please note the configuration: superuser(system) and port(36321) for database(test) of resource(DB0)
Please note the configuration: superuser(system) and port(36321) for database(test) of resource(DB1)
config kingbase rac start
config kingbase rac success
add_udev_rule start
add_udev_rule success
insmod dlm.ko success
check and mknod for dlm start
check and mknod for dlm success
# 查看/root/.bashrc
[root@node201 KingbaseHA]# cat /root/.bashrc
export PATH=/opt/KingbaseHA/python2.7/bin:/opt/KingbaseHA/pacemaker/sbin/:$PATH
export PATH=/opt/KingbaseHA/crmsh/bin:/opt/KingbaseHA/pacemaker/libexec/pacemaker/:$PATH
export PATH=/opt/KingbaseHA/corosync/sbin:/opt/KingbaseHA/corosync-qdevice/sbin:$PATH
export PYTHONPATH=/opt/KingbaseHA/python2.7/lib/python2.7/site-packages/:/opt/KingbaseHA/crmsh/lib/python2.7/site-packages:$PYTHONPATH
export COROSYNC_MAIN_CONFIG_FILE=/opt/KingbaseHA/corosync/etc/corosync/corosync.conf
export CRM_CONFIG_FILE=/opt/KingbaseHA/crmsh/etc/crm/crm.conf
export OCF_ROOT=/opt/KingbaseHA/pacemaker/ocf/
export HA_SBIN_DIR=/opt/KingbaseHA/pacemaker/sbin/
export PATH=/opt/KingbaseHA/dlm-dlm/sbin:/opt/KingbaseHA/gfs2-utils/sbin:$PATH
export LD_LIBRARY_PATH=/opt/KingbaseHA/corosync/lib/:$LD_LIBRARY_PATH
export install_dir=/opt/KingbaseHA
export OCF_ROOT=/opt/KingbaseHA/pacemaker/ocf
export QDEVICE_SBIN_DIR=/opt/KingbaseHA/corosync-qdevice/sbin/
export LD_LIBRARY_PATH=/opt/KingbaseHA/lib64/:$LD_LIBRARY_PATH
export HA_INSTALL_PATH=/opt/KingbaseHA
# 应用变量配置
[root@node201 KingbaseHA]# source /root/.bashrc
# 查看高可用服务
[root@node202 KingbaseHA]# ps -ef |grep corosync
root 25876 1 0 Jul30 ? 00:00:13 corosync -c /opt/KingbaseHA/corosync/etc/corosync/corosync.conf -p /opt/KingbaseHA/corosync/var/
[root@node202 KingbaseHA]# ps -ef |grep pacemaker
root 25909 1 0 Jul30 pts/0 00:00:00 pacemakerd -d /opt/KingbaseHA/pacemaker
haclust+ 25911 25909 5 Jul30 ? 00:58:55 /opt/KingbaseHA/pacemaker/libexec/pacemaker/pacemaker-based -d /opt/KingbaseHA/pacemaker
root 25912 25909 0 Jul30 ? 00:00:00 /opt/KingbaseHA/pacemaker/libexec/pacemaker/pacemaker-fenced -d /opt/KingbaseHA/pacemaker
root 25913 25909 0 Jul30 ? 00:00:00 /opt/KingbaseHA/pacemaker/libexec/pacemaker/pacemaker-execd -d /opt/KingbaseHA/pacemaker
haclust+ 25914 25909 0 Jul30 ? 00:00:00 /opt/KingbaseHA/pacemaker/libexec/pacemaker/pacemaker-attrd
haclust+ 25915 25909 0 Jul30 ? 00:00:00 /opt/KingbaseHA/pacemaker/libexec/pacemaker/pacemaker-schedulerd -d /opt/KingbaseHA/pacemaker
haclust+ 25916 25909 0 Jul30 ? 00:00:01 /opt/KingbaseHA/pacemaker/libexec/pacemaker/pacemaker-controld -d /opt/KingbaseHA/pacemaker
查看corosync.conf配置:
[root@node201 KingbaseHA]# cat /etc/corosync/corosync.conf|grep -v '#'
totem {
version: 2
crypto_cipher: none
crypto_hash: none
interface {
ringnumber: 0
bindnetaddr: 192.168.1.0
mcastaddr: 239.255.1.1
mcastport: 5405
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
logfile: /var/log/cluster/corosync.log
to_syslog: yes
debug: off
timestamp: on
logger_subsys {
subsys: QUORUM
debug: off
}
}
quorum {
}
**5、数据库集簇初始化 **
如下所示,将在共享存储的文件系统下创建数据库:
[root@node201 KingbaseHA]# ./cluster_manager.sh --init_rac
init KingbaseES RAC start
clusterfs is NFS [ ok ]
create_rac_share_dir start
create_rac_share_dir success
数据库簇将使用本地化语言 "zh_CN.utf8"进行初始化.
.....
成功。您现在可以用下面的命令开启数据库服务器:
./sys_ctl -D /sharedata/data_nfs/kingbase/data -l 日志文件 start
init KingbaseES RAC success
6、配置数据库相关资源
如下所示,初始化fence、pingd、fip、DB等资源:
# 执行脚本
[root@node201 KingbaseHA]# ./cluster_manager.sh --config_rac_resource
crm configure DB resource start
crm configure DB resource end
# 查看资源配置
[root@node201 KingbaseHA]# crm configure show
node 1: node201
node 2: node202
primitive DB ocf:kingbase:kingbase \
params sys_ctl="/opt/Kingbase/ES/V8/Server/bin/sys_ctl" ksql="/opt/Kingbase/ES/V8/Server/bin/ksql" sys_isready="/opt/Kingbase/ES/V8/Server/bin/sys_isready" kb_data="/sharedata/data_nfs/kingbase/data" kb_dba=kingbase kb_host=0.0.0.0 kb_user=system kb_port=54321 kb_db=template1 logfile="/opt/Kingbase/ES/V8/Server/log/kingbase1.log" \
op start interval=0 timeout=120 \
op stop interval=0 timeout=120 \
op monitor interval=9s timeout=30 on-fail=stop \
meta failure-timeout=5min
clone clone-DB DB \
meta interleave=true target-role=Started
property cib-bootstrap-options: \
no-quorum-policy=freeze \
stonith-enabled=false \
load-threshold="0%"
# 查看集群服务状态
[root@node201 KingbaseHA]# crm status
Cluster Summary:
* Stack: unknown
* Current DC: node202 (version unknown) - partition with quorum
* Last updated: Thu Aug 1 16:49:02 2024
* Last change: Wed Jul 31 15:10:39 2024 by root via cibadmin on node201
* 2 nodes configured
* 2 resource instances configured
Node List:
* Online: [ node201 node202 ]
Full List of Resources:
* Clone Set: clone-DB [DB]:
* Started: [ node201 node202 ]
7、 集群管理数据库服务
[root@node201 KingbaseHA]# crm resource start clone-DB
[root@node201 KingbaseHA]# crm resource status clone-DB
resource clone-DB is running on: node202
resource clone-DB is running on: node201
[root@node201 KingbaseHA]# crm resource stop clone-DB
[root@node201 KingbaseHA]# crm resource status clone-DB
resource clone-DB is running on: node201
resource clone-DB is NOT running
[root@node201 KingbaseHA]# crm resource status clone-DB
resource clone-DB is NOT running
resource clone-DB is NOT running
[root@node201 KingbaseHA]# crm resource status clone-DB
resource clone-DB is NOT running
resource clone-DB is NOT running
# 集群启动数据库服务
[root@node201 KingbaseHA]# netstat -antlp |grep 54321
[root@node201 KingbaseHA]# crm resource start clone-DB
[root@node201 KingbaseHA]# crm resource status clone-DB
resource clone-DB is running on: node202
resource clone-DB is running on: node201
[root@node201 KingbaseHA]# netstat -antlp |grep 54321
tcp 0 0 0.0.0.0:54321 0.0.0.0:* LISTEN 10915/kingbase
[root@node201 KingbaseHA]# ps -ef |grep kingbase
kingbase 10915 1 0 17:27 ? 00:00:00 /opt/Kingbase/ES/V8/KESRealPro/V008R006C008M030B0010/Server/bin/kingbase -D /sharedata/data_nfs/kingbase/data -c config_file=/sharedata/data_nfs/kingbase/data/kingbase.conf -c log_directory=sys_log -h 0.0.0.0
kingbase 10939 10915 0 17:27 ? 00:00:00 kingbase: logger
kingbase 10940 10915 0 17:27 ? 00:00:00 kingbase: lmon
kingbase 10941 10915 0 17:27 ? 00:00:00 kingbase: lms 1
kingbase 10942 10915 0 17:27 ? 00:00:00 kingbase: lms 2
kingbase 10943 10915 0 17:27 ? 00:00:00 kingbase: lms 3
kingbase 10944 10915 0 17:27 ? 00:00:00 kingbase: lms 4
kingbase 10945 10915 0 17:27 ? 00:00:00 kingbase: lms 5
kingbase 10946 10915 0 17:27 ? 00:00:00 kingbase: lms 6
kingbase 10947 10915 0 17:27 ? 00:00:00 kingbase: lms 7
kingbase 11062 10915 0 17:27 ? 00:00:00 kingbase: checkpointer
kingbase 11063 10915 0 17:27 ? 00:00:00 kingbase: background writer
kingbase 11064 10915 0 17:27 ? 00:00:00 kingbase: global deadlock checker
kingbase 11065 10915 0 17:27 ? 00:00:00 kingbase: transaction syncer
kingbase 11066 10915 0 17:27 ? 00:00:00 kingbase: walwriter
kingbase 11067 10915 0 17:27 ? 00:00:00 kingbase: autovacuum launcher
kingbase 11068 10915 0 17:27 ? 00:00:00 kingbase: stats collector
kingbase 11069 10915 0 17:27 ? 00:00:00 kingbase: kwr collector
kingbase 11070 10915 0 17:27 ? 00:00:00 kingbase: ksh writer
kingbase 11071 10915 0 17:27 ? 00:00:00 kingbase: ksh collector
kingbase 11072 10915 0 17:27 ? 00:00:00 kingbase: logical replication launcher
四、访问数据库服务
1、启动数据库实例
如下所示,启动数据库实例默认等待60s,可以指定-t,-w延长数据库启动时间:
[kingbase@node201 bin]$ ./sys_ctl -D /sharedata/data_nfs/kingbase/data start -t 180 -w
waiting for server to start....2024-07-31 14:54:53.908 CST [18153] LOG: sepapower extension initialized
2024-07-31 14:54:53.912 CST [18153] LOG: starting KingbaseES V008R006C008M030B0010 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-28), 64-bit
.......
..................................... done
server started
2、查看数据库进程
如下所示,默认启动数据库服务后,会启动7个lms进程:
[kingbase@node201 bin]$ ps -ef |grep kingbase
/opt/Kingbase/ES/V8/KESRealPro/V008R006C008M030B0010/Server/bin/kingbase -D /sharedata/data_nfs/kingbase/data
kingbase 18161 18153 0 14:54 ? 00:00:00 kingbase: logger
kingbase 18162 18153 0 14:54 ? 00:00:00 kingbase: lmon
kingbase 18163 18153 0 14:54 ? 00:00:00 kingbase: lms 1
kingbase 18164 18153 0 14:54 ? 00:00:00 kingbase: lms 2
kingbase 18165 18153 0 14:54 ? 00:00:00 kingbase: lms 3
kingbase 18166 18153 0 14:54 ? 00:00:00 kingbase: lms 4
kingbase 18167 18153 0 14:54 ? 00:00:00 kingbase: lms 5
kingbase 18168 18153 0 14:54 ? 00:00:00 kingbase: lms 6
kingbase 18169 18153 0 14:54 ? 00:00:00 kingbase: lms 7
kingbase 18590 18153 0 14:55 ? 00:00:00 kingbase: checkpointer
kingbase 18591 18153 0 14:55 ? 00:00:00 kingbase: background writer
kingbase 18592 18153 0 14:55 ? 00:00:00 kingbase: global deadlock checker
kingbase 18593 18153 0 14:55 ? 00:00:00 kingbase: transaction syncer
kingbase 18594 18153 0 14:55 ? 00:00:00 kingbase: walwriter
kingbase 18595 18153 0 14:55 ? 00:00:00 kingbase: autovacuum launcher
kingbase 18596 18153 0 14:55 ? 00:00:00 kingbase: stats collector
kingbase 18597 18153 0 14:55 ? 00:00:00 kingbase: kwr collector
kingbase 18598 18153 0 14:55 ? 00:00:00 kingbase: ksh writer
kingbase 18599 18153 0 14:55 ? 00:00:00 kingbase: ksh collector
kingbase 18600 18153 0 14:55 ? 00:00:00 kingbase: logical replication launcher
# 数据库服务端口
[root@node201 KingbaseHA]# netstat -antlp |grep 54
tcp 0 0 0.0.0.0:54321 0.0.0.0:* LISTEN 2710/kingbase
tcp 0 0 192.168.1.201:54322 0.0.0.0:* LISTEN 2720/kingbase: lms
tcp 0 0 192.168.1.201:54323 0.0.0.0:* LISTEN 2721/kingbase: lms
tcp 0 0 192.168.1.201:54324 0.0.0.0:* LISTEN 2722/kingbase: lms
tcp 0 0 192.168.1.201:54325 0.0.0.0:* LISTEN 2723/kingbase: lms
tcp 0 0 192.168.1.201:54326 0.0.0.0:* LISTEN 2724/kingbase: lms
tcp 0 0 192.168.1.201:54327 0.0.0.0:* LISTEN 2725/kingbase: lms
tcp 0 0 192.168.1.201:54328 0.0.0.0:* LISTEN 2726/kingbase: lms
tcp 0 0 192.168.1.201:54325 192.168.1.202:46262 ESTABLISHED 2723/kingbase: lms
tcp 0 0 192.168.1.201:54326 192.168.1.202:14635 ESTABLISHED 2724/kingbase: lms
tcp 0 0 192.168.1.201:54322 192.168.1.202:35969 ESTABLISHED 2720/kingbase: lms
tcp 0 0 192.168.1.201:54324 192.168.1.202:11226 ESTABLISHED 2722/kingbase: lms
tcp 0 0 192.168.1.201:54323 192.168.1.202:25151 ESTABLISHED 2721/kingbase: lms
tcp 0 0 192.168.1.201:54328 192.168.1.202:51058 ESTABLISHED 2726/kingbase: lms
tcp 0 0 192.168.1.201:54327 192.168.1.202:53133 ESTABLISHED 2725/kingbase: lms
tcp6 0 0 :::54321 :::* LISTEN 2710/kingbase
3、访问数据库
# 当前节点:
[kingbase@node201 bin]$ ./ksql -U system test
Type "help" for help.
test=# create database prod;
CREATE DATABASE
test=# \c prod
You are now connected to database "prod" as userName "system".
prod=# create table t1 (id int ,name varchar(20));
CREATE TABLE
prod=# insert into t1 values (generate_series(1,1000),'usr'||generate_series(1,1000));
INSERT 0 1000
prod=# select count(*) from t1;
count
-------
1000
(1 row)
# 另外的节点:
[kingbase@node202 bin]$ ./ksql -U system test
Type "help" for help.
test=# \c prod
You are now connected to database "prod" as userName "system".
prod=# select count(*) from t1;
count
-------
1000
(1 row)
五、查看相关的服务
# pacemaker进程状态
[root@node201 KingbaseHA]# /etc/init.d/pacemaker status
pacemakerd (pid 5075) is running...
# corosync进程状态
[root@node201 KingbaseHA]# /etc/init.d/corosync status
corosync (pid 5043) is running...
# Corosync-qdevice进程状态
[root@node201 KingbaseHA]# /etc/init.d/corosync-qdevice status
corosync-qdevice is stopped
# 节点状态
[root@node201 KingbaseHA]# crm_mon -1|grep -E "offline|standby|Online"
* Online: [ node201 node202 ]
# 资源状态
[root@node201 KingbaseHA]# crm resouce show|grep failed
# 查看集群是否有quorum
[root@node201 KingbaseHA]# corosync-quorumtool -s|grep Quorate
Quorate: Yes
Flags: Quorate Qdevice
# disk模式master_i
[root@node201 KingbaseHA]# /corosync-qdevice-tool -sv -p /opt/KingbaseHA/corosync-qdevice/var/run/corosync-qdevice/corosync-qdevice.sock|grep "master id" | awk -F':' '{print $2}'
-bash: /corosync-qdevice-tool: No such file or directory
# 存储STATE
[root@node201 KingbaseHA]# crm storage|grep UNMOUNT|wc -l
INFO: Show the storage usage
WARNING: not had managed filesystem(fs_resources_names:[])
WARNING: not filesystem in managed
ERROR: get votedisk lable information() failed(errno:255)
ERROR: get votedisk infomation failed(255)
0
# 存储容量
[root@node201 KingbaseHA]# crm storage|grep -v -e "VOTEDISK|PATH" | awk '{print ((4> 5) > 0.8)}' | grep 1|wc -l
INFO: Show the storage usage
WARNING: not had managed filesystem(fs_resources_names:[])
WARNING: not filesystem in managed
ERROR: get votedisk lable information() failed(errno:255)
ERROR: get votedisk infomation failed(255)
0
六、删除集群
1、清除集群配置(all nodes)
[root@node201 KingbaseHA]# ./cluster_manager.sh --clean_all
clean all start
Signaling Pacemaker Cluster Manager to terminate[ OK ]
Waiting for cluster services to unload.[ OK ]
Signaling Corosync Cluster Engine (corosync) to terminate: [ OK ]
Waiting for corosync services to unload:..[ OK ]
clean env variable start
clean env variable success
clean host start
clean host success
remove pacemaker daemon user start
remove pacemaker daemon user success
clean all success
# 查看集群状态
[root@node201 KingbaseHA]# crm status
Error: cluster is not available on this node
ERROR: status: crm_mon (rc=102):
[root@node201 KingbaseHA]# crm configure show
ERROR: running cibadmin -Ql: Connection to the CIB manager failed: Transport endpoint is not connected
Init failed, could not perform requested operations
ERROR: configure: Missing requirements
2、卸载集群部署
Tips:
执行uninstall命令,将会删除整个集群安装的目录,即install_dir变量指定的路径。
该命令不会清除已经初始化的kingbase的data目录,如果需要清除数据目录,
需要手动挂载目录,并清除。
[root@node201 KingbaseHA]# ./cluster_manager.sh --uninstall
uninstall start
./cluster_manager.sh: line 1276: /etc/init.d/pacemaker: No such file or directory
./cluster_manager.sh: line 1335: /etc/init.d/corosync-qdevice: No such file or directory
./cluster_manager.sh: line 1148: /etc/init.d/corosync: No such file or directory
clean env variable start
clean env variable success
clean host start
clean host success
remove pacemaker daemon user start
userdel: user 'hacluster' does not exist
groupdel: group 'haclient' does not exist
remove pacemaker daemon user success
uninstall success
# /opt/kingbaseHA目录被清除,数据库data目录需要手工清理
[root@node201 KingbaseHA]# pwd
/opt/KingbaseHA
[root@node201 KingbaseHA]# ls
[root@node201 KingbaseHA]# rm -rf /sharedata/data_nfs/*
[root@node201 KingbaseHA]# ls -lh /sharedata/data_nfs/
total 0
七、附件
1、基础组件初始化错误
1)qdisk错误
# 执行脚本失败
[root@node201 KingbaseHA]# ./cluster_manager.sh --base_configure_init
qdisk disabled when qdisk_fence enabled, please check 'enable_qdisk'
# NAS环境关闭qdisk
[root@node201 KingbaseHA]# cat cluster_manager.conf |grep qdisk
enable_qdisk=1
2)节点名(node_name)配置错误
# 执行脚本失败
[root@node201 KingbaseHA]# ./cluster_manager.sh --base_configure_init
init kernel soft watchdog start
init kernel soft watchdog success
config host start
Host entry 192.168.1.201 node1 found, skiping...
Host entry 192.168.1.202 node2 found, skiping...
config host success
add env varaible in /root/.bashrc
add env variable success
config corosync.conf start
config corosync.conf success
Starting Corosync Cluster Engine (corosync): [FAILED]
# 查看系统message日志
#cat /var/log/messages:
......
Jul 30 17:55:56 node201 corosync[28803]: Can't read file /corosync/etc/corosync/corosync.conf: No such file or directory
Jul 30 18:00:01 node201 systemd: Started Session 115 of user root.
Jul 30 18:01:01 node201 systemd: Started Session 116 of user root.
Jul 30 18:02:13 node201 corosync[29256]: [MAIN ] Corosync Cluster Engine 3.0.4.5-a4dc starting up
Jul 30 18:02:13 node201 corosync[29256]: [MAIN ] Corosync built-in features: pie relro bindnow
Jul 30 18:02:13 node201 corosync[29256]: [MAIN ] failed to parse node address 'node2'
Jul 30 18:02:13 node201 corosync[29256]: [MAIN ] Corosync Cluster Engine exiting with status 8 at main.c:1377.
# 将node_name配置和/etc/hosts配置一致
[root@node201 KingbaseHA]# cat cluster_manager.conf |grep node
##cluster node information
node_name=(node201 node202)
node_ip=(192.168.1.201 192.168.1.202)
3)关闭enbale_fence
如下所示,脚本执行失败,查看message日志,qdisk_fenced故障:
#查看系统message日志
Jul 30 18:04:36 node201 qdisk-fenced[30118]: qdisk_validate: open of /dev/block/11:0 for RDWR failed: No medium found
Jul 30 18:04:36 node201 qdisk-fenced[30118]: can't find disk label kcluster
Jul 30 18:04:36 node201 kernel: traps: qdisk-fenced[30118] trap stack segment ip:7fc808ad74bc sp:7ffef9628b20 error:0 in libc-2.17.so[7fc808a57000+1c4000]
Jul 30 18:04:36 node201 abrt-hook-ccpp: Process 30118 (qdisk-fenced) of user 0 killed by SIGBUS - dumping core
Jul 30 18:04:37 node201 abrt-server: Executable '/opt/KingbaseHA/corosync-qdevice/sbin/qdisk-fenced' doesn't belong to any package and ProcessUnpackaged is set to 'no'
Jul 30 18:04:37 node201 abrt-server: 'post-create' on '/var/spool/abrt/ccpp-2024-07-30-18:04:36-30118' exited with 1
# 配置enable_fence
[root@node201 KingbaseHA]# cat cluster_manager.conf |grep fence
################# fence #################
##If use fence, set 'enable_fence' to 1.
#If set 'enable_fence' to 1, you should set one of the fence method 'enable_esxi', 'enable_ipmi'
#or 'enable_qdisk_fence' to 1 as required. If set 'enable_fence' to 0, we consider you do NOT need
#fence and regard 'enable_esxi', 'enable_ipmi' and 'enable_qdisk_fence' as 0 automatically.
enable_fence=0
4)数据库实例启动失败
如下所示,系统内核参数配置错误,导致数据库实例启动失败:
# 数据库服务启动失败
[kingbase@node201 bin]$ ./sys_ctl -D /sharedata/data_nfs/kingbase/data start -t 180 -w
waiting for server to start....2024-07-31 14:44:46.072 CST [10870] LOG: config the real archive_command string as soon as possible to archive WAL files
2024-07-31 14:44:46.083 CST [10870] LOG: sepapower extension initialized
2024-07-31 14:44:46.087 CST [10870] LOG: starting KingbaseES V008R006C008M030B0010 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-28), 64-bit
......
2024-07-31 14:44:46.204 CST [10870] HINT: Future log output will appear in directory "sys_log/1".
................................................................................................................................................................................... stopped waiting
sys_ctl: server did not start in time
# 查看sys_log日志:
[kingbase@node201 1]$ cat kingbase-2024-07-31_143553.log
2024-07-31 14:35:53.161 CST [4605] LOG: listening on IPv4 address "192.168.1.201", port 54327
2024-07-31 14:35:53.162 CST [4606] LOG: listening on IPv4 address "192.168.1.201", port 54328
2024-07-31 14:35:53.163 CST [4602] LOG: listening on IPv4 address "192.168.1.201", port 54324
2024-07-31 14:35:53.164 CST [4603] LOG: listening on IPv4 address "192.168.1.201", port 54325
2024-07-31 14:35:53.166 CST [4600] LOG: listening on IPv4 address "192.168.1.201", port 54322
2024-07-31 14:35:53.167 CST [4604] LOG: listening on IPv4 address "192.168.1.201", port 54326
2024-07-31 14:35:53.168 CST [4601] LOG: listening on IPv4 address "192.168.1.201", port 54323
# 检查内核参数配置
[root@node201 KingbaseHA]# cat /etc/sysctl.conf
.......
kernel.sem= 5010 641280 5010 256
kernel.shmall = 18446744073692774399
kernel.shmmax = 18446744073692774399
kernel.shmmax = 4294967
# 将shm配置注释后启动成功
[root@node201 KingbaseHA]# cat /etc/sysctl.conf
.......
kernel.sem= 5010 641280 5010 256
# kernel.shmall = 18446744073692774399
# kernel.shmmax = 18446744073692774399
# kernel.shmmax = 4294967
5)清理资源配置
# 查看资源配置
[root@node201 KingbaseHA]# crm configure show
ta" kb_dba=kingbase kb_host=0.0.0.0 kb_user=system kb_port=54321 kb_db=template1 logfile="/opt/Kingbase/ES/V8/Server/log/kingbase1.log" \
op start interval=0 timeout=120 \
op stop interval=0 timeout=120 \
op monitor interval=9s timeout=30 on-fail=stop \
meta failure-timeout=5min
primitive DB1 ocf:kingbase:kingbase \
params sys_ctl="/opt/Kingbase/ES/V8/Server/bin/sys_ctl" ksql="/opt/Kingbase/ES/V8/Server/bin/ksql" sys_isready="/opt/Kingbase/ES/V8/Server/bin/sys_isready" kb_data="/sharedata/data_nfs/kingbase/data" kb_dba=kingbase kb_host=0.0.0.0 kb_user=system kb_port=54321 kb_db=template1 logfile="/opt/Kingbase/ES/V8/log/kingbase1.log" \
op start interval=0 timeout=120 \
op stop interval=0 timeout=120 \
op monitor interval=9s timeout=30 on-fail=stop \
meta failure-timeout=5min
clone clone-DB DB \
meta interleave=true target-role=Started
clone clone-DB1 DB1 \
meta target-role=Stopped
property cib-bootstrap-options: \
have-watchdog=false \
dc-version=2.0.3-4b1f869f0f \
cluster-infrastructure=corosync \
cluster-name=kcluster \
no-quorum-policy=freeze \
stonith-enabled=false \
load-threshold="0%"
# 清理资源配置
[root@node201 KingbaseHA]# crm configure erase
WARNING: resource DB is running, can't delete it
ERROR: CIB erase aborted (nothing was deleted)
# 关闭数据库
[kingbase@node201 bin]$ ./sys_ctl -D /sharedata/data_nfs/kingbase/data stop
waiting for server to shut down........... done
server stopped
# 清理资源配置
[root@node201 KingbaseHA]# crm configure erase
[root@node201 KingbaseHA]# crm configure show
node 1: node201
node 2: node202
# 配置数据库等资源
[root@node201 KingbaseHA]# ./cluster_manager.sh --config_rac_resource
crm configure DB resource start
crm configure DB resource end
# 查看资源配置
[root@node201 KingbaseHA]# crm configure show
node 1: node201
node 2: node202
primitive DB ocf:kingbase:kingbase \
params sys_ctl="/opt/Kingbase/ES/V8/Server/bin/sys_ctl" ksql="/opt/Kingbase/ES/V8/Server/bin/ksql" sys_isready="/opt/Kingbase/ES/V8/Server/bin/sys_isready" kb_data="/sharedata/data_nfs/kingbase/data" kb_dba=kingbase kb_host=0.0.0.0 kb_user=system kb_port=54321 kb_db=template1 logfile="/opt/Kingbase/ES/V8/Server/log/kingbase1.log" \
op start interval=0 timeout=120 \
op stop interval=0 timeout=120 \
op monitor interval=9s timeout=30 on-fail=stop \
meta failure-timeout=5min
clone clone-DB DB \
meta interleave=true target-role=Started
property cib-bootstrap-options: \
no-quorum-policy=freeze \
stonith-enabled=false \
load-threshold="0%"
八、总结
使用NAS作为共享存储部署KingbaseES RAC架构相对比较简单,但一般用于测试环境,不建议用作生产环境。