首页 > 其他分享 >kubernetes(五)

kubernetes(五)

时间:2023-02-06 13:57:08浏览次数:63  
标签:web name kubernetes redis cluster mysql root

一、基于StatefulSet控制器运行Redis Cluster

1.1 构建Redis镜像

编写Dockerfile

#Redis Image
FROM harbor.chu.net/baseimages/centos-base:7.9.2009 

ADD redis-4.0.14.tar.gz /usr/local/src
RUN ln -sv /usr/local/src/redis-4.0.14 /usr/local/redis && cd /usr/local/redis && make && cp src/redis-cli /usr/sbin/ && cp src/redis-server  /usr/sbin/ && mkdir -pv /data/redis-data 
ADD redis.conf /usr/local/redis/redis.conf 
ADD run_redis.sh /usr/local/redis/run_redis.sh
RUN chmod a+x /usr/local/redis/run_redis.sh

EXPOSE 6379

CMD ["/usr/local/redis/run_redis.sh"

构建镜像,上传至本地仓库

nerdctl build -t harbor.chu.net/web/redis:4.0.14 .
nerdctl push harbor.chu.net/web/redis:4.0.14

1.2 测试Redis镜像

# 创建容器
[root@k8s-master2 redis]#nerdctl run -d -p 6379:6379 harbor.chu.net/web/redis:4.0.14 
974c83b816797ff46deb6ad7f00b2dc33beb0581c4ae2c0f71c6bc7aceee6728

# 进入容器测试
[root@k8s-master2 redis]#nerdctl exec -it 974c83b81679 bash
[root@974c83b81679 /]# redis-cli -h 127.0.0.1 -p 6379 -a 123456
Warning: Using a password with '-a' option on the command line interface may not be safe.
127.0.0.1:6379> set key1 value1
OK
127.0.0.1:6379> get key1
"value1"

1.3 准备存储环境

# nfs服务器:10.0.0.101
apt install -y nfs-server

mkdir -p /data/k8sdata/web/{redis0,redis1,redis2,redis3,redis4,redis5}

echo "/data/k8sdata *(rw,sync,no_root_squash,no_subtree_check)" >> /etc/exports

exportfs -r

1.4 创建PV

apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv0
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/redis0

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv1
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/redis1

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv2
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/redis2

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv3
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/redis3

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv4
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/redis4

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: redis-cluster-pv5
spec:
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/redis5

验证PV

[root@k8s-master2 pv]#kubectl get pv
NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                         STORAGECLASS   REASON   AGE
redis-cluster-pv0        5Gi        RWO            Retain           Available                                                         6s
redis-cluster-pv1        5Gi        RWO            Retain           Available                                                         6s
redis-cluster-pv2        5Gi        RWO            Retain           Available                                                         6s
redis-cluster-pv3        5Gi        RWO            Retain           Available                                                         6s
redis-cluster-pv4        5Gi        RWO            Retain           Available                                                         6s
redis-cluster-pv5        5Gi        RWO            Retain           Available                                                         6s

1.5 部署Redis Cluster

1.5.1 准备redis配置文件

#cat redis.conf 
appendonly yes
cluster-enabled yes
cluster-config-file /var/lib/redis/nodes.conf
cluster-node-timeout 5000
dir /var/lib/redis
port 6379

创建configmap

#kubectl create configmap redis-conf --from-file=redis.conf -n web
configmap/redis-conf created

# 查看configmap
#kubectl describe cm redis-conf -n web
Name:         redis-conf
Namespace:    web
Labels:       <none>
Annotations:  <none>

Data
====
redis.conf:
----
appendonly yes
cluster-enabled yes
cluster-config-file /var/lib/redis/nodes.conf
cluster-node-timeout 5000
dir /var/lib/redis
port 6379


BinaryData
====

Events:  <none>

1.5.2 创建Redis Cluster

apiVersion: v1
kind: Service
metadata:
  name: redis
  namespace: web
  labels:
    app: redis
spec:
  selector:
    app: redis
    appCluster: redis-cluster
  ports:
  - name: redis
    port: 6379
  clusterIP: None			#无头服务
  
---
apiVersion: v1
kind: Service
metadata:
  name: redis-access
  namespace: web
  labels:
    app: redis
spec:
  selector:
    app: redis
    appCluster: redis-cluster
  ports:
  - name: redis-access
    protocol: TCP
    port: 6379
    targetPort: 6379

---
apiVersion: apps/v1
kind: StatefulSet					# statefulset控制器
metadata:
  name: redis
  namespace: web
spec:
  serviceName: redis
  replicas: 6
  selector:
    matchLabels:
      app: redis
      appCluster: redis-cluster
  template:
    metadata:
      labels:
        app: redis
        appCluster: redis-cluster
    spec:
            #      terminationGracePeriodSeconds: 20			#停止该容器之前等待的宽限时长
            #      affinity:
            #        podAntiAffinity:
            #          preferredDuringSchedulingIgnoredDuringExecution:
            #          - weight: 100
            #            podAffinityTerm:
            #              labelSelector:
            #                matchExpressions:
            #                - key: app
            #                  operator: In
            #                  values:
            #                  - redis
            #              topologyKey: kubernetes.io/hostname
      containers:
      - name: redis
        image: harbor.chu.net/web/redis:4.0.14
        command:
          - "redis-server"
        args:
          - "/etc/redis/redis.conf"
          - "--protected-mode"
          - "no"
        resources:
          requests:
            cpu: "500m"
            memory: "500Mi"
        ports:
        - containerPort: 6379
          name: redis
          protocol: TCP
        - containerPort: 16379
          name: cluster
          protocol: TCP
        volumeMounts:
        - name: conf
          mountPath: /etc/redis
        - name: data
          mountPath: /var/lib/redis
      volumes:
      - name: conf
        configMap:				#configmap
          name: redis-conf
          items:
          - key: redis.conf
            path: redis.conf
  volumeClaimTemplates:				# PVC模板
  - metadata:
      name: data
      namespace: web
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 5Gi

volumeClaimTemplate

对于有状态的副本集都会用到持久存储,对于分布式系统来讲,它的最大特点是数据是不一样的,所以各个节点不能使用同一存储卷,每个节点有自已的专用存储,但是如果在Deployment中的Pod template里定义的存储卷,是所有副本集共用一个存储卷,数据是相同的,因为是基于模板来的 ,而statefulset中每个Pod都要自已的专有存储卷,所以statefulset的存储卷就不能再用Pod模板来创建了,于是statefulSet使用volumeClaimTemplate,称为卷申请模板,它会为每个Pod生成不同的pvc,并绑定pv, 从而实现各pod有专用存储。

根据volumeClaimTemplates,为每个Pod创建一个pvc,pvc的命名规则匹配模式:(volumeClaimTemplates.name)-(statefulSet.name)-id(从0开始),比如上面的volumeMounts.name=data,statefulSet.name=redis,因此创建出来的PVC是data-redis-0、data-redis-1、data-redis-2。

模板会自动查找可用PV,并将PVC随机绑定PV,示例如下:

[root@k8s-deploy redis-cluster]#kubectl get pv
NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                         STORAGECLASS   REASON   AGE
redis-cluster-pv0        5Gi        RWO            Retain           Bound       web/data-redis-3                                      8m48s
redis-cluster-pv1        5Gi        RWO            Retain           Available                                                         8m48s
redis-cluster-pv2        5Gi        RWO            Retain           Bound       web/data-redis-0                                      8m48s
redis-cluster-pv3        5Gi        RWO            Retain           Bound       web/data-redis-1                                      8m48s
redis-cluster-pv4        5Gi        RWO            Retain           Available                                                         8m48s
redis-cluster-pv5        5Gi        RWO            Retain           Bound       web/data-redis-2                                      8m48s
redis-cluster-pv6        5Gi        RWO            Retain           Available                                                         2m45s
redis-cluster-pv7        5Gi        RWO            Retain           Available                                                         2m45s
redis-cluster-pv8        5Gi        RWO            Retain           Available                                                         2m45s
redis-cluster-pv9        5Gi        RWO            Retain           Bound       web/data-redis-5                                      2m45s
test-pv1                 5Gi        RWO            Retain           Bound       web/data-redis-4                                      2m45s
test-pv2                 5Gi        RWO            Retain           Available                                                         2m45s
test-pv3                 5Gi        RWO            Retain           Available                                                         2m45s
test-pv4                 5Gi        RWO            Retain           Available                                                         2m45s
test-pv5                 5Gi        RWO            Retain           Available                                                         2m45s
test-pv6                 5Gi        RWO            Retain           Available                                                         2m45s

1.5.3 滚动创建

# 按顺序创建pod
[root@k8s-master2 redis-cluster]#kubectl get pod -n web
NAME                                          READY   STATUS    RESTARTS   AGE
redis-0                                       1/1     Running   0          96s
redis-1                                       1/1     Running   0          94s
redis-2                                       0/1     Pending   0          1s


[root@k8s-master2 redis-cluster]#kubectl get pod -n web
NAME                                          READY   STATUS              RESTARTS   AGE
redis-0                                       1/1     Running             0          112s
redis-1                                       1/1     Running             0          110s
redis-2                                       1/1     Running             0          17s
redis-3                                       1/1     Running             0          13s
redis-4                                       0/1     ContainerCreating   0          0s

pod创建完成

[root@k8s-master2 redis-cluster]#kubectl get pod -n web
NAME                                          READY   STATUS    RESTARTS   AGE
redis-0                                       1/1     Running   0          119s
redis-1                                       1/1     Running   0          117s
redis-2                                       1/1     Running   0          24s
redis-3                                       1/1     Running   0          20s
redis-4                                       1/1     Running   0          7s
redis-5                                       1/1     Running   0          5s

1.5.4 验证PVC状态

[root@k8s-master2 redis-cluster]#kubectl get pvc -n web
NAME                      STATUS   VOLUME                   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
data-redis-0              Bound    redis-cluster-pv1        5Gi        RWO                           8m52s
data-redis-1              Bound    redis-cluster-pv2        5Gi        RWO                           8m48s
data-redis-2              Bound    redis-cluster-pv0        5Gi        RWO                           3m34s
data-redis-3              Bound    redis-cluster-pv4        5Gi        RWO                           3m30s
data-redis-4              Bound    redis-cluster-pv3        5Gi        RWO                           3m17s
data-redis-5              Bound    redis-cluster-pv5        5Gi        RWO                           3m15s

1.6 初始化Redis Cluster

初始化只需要一次,redis 4及之前的版本需要使用redis-tribe工具进行初始化,redis 5开始使用redis-cli。

# 创建初始化pod
[root@k8s-deploy ~]#kubectl run -it ubuntu1804 --image=ubuntu:18.04 --restart=Never -n web bash
apt update
apt install -y python2.7 python-pip redis-tools dnsutils iputils-ping net-tools
pip install --upgrade pip	#如安装redis-trib失败,可执行升级
pip install redis-trib==0.5.1

# 创建集群
redis-trib.py create \
`dig +short redis-0.redis.web.svc.cluster.local`:6379 \
`dig +short redis-1.redis.web.svc.cluster.local`:6379 \
`dig +short redis-2.redis.web.svc.cluster.local`:6379

# 将redis-3加入redis-0
redis-trib.py replicate \
--master-addr `dig +short redis-0.redis.web.svc.cluster.local`:6379 \
--slave-addr `dig +short redis-3.redis.web.svc.cluster.local`:6379

# 将redis-4加入redis-1
redis-trib.py replicate \
--master-addr `dig +short redis-1.redis.web.svc.cluster.local`:6379 \
--slave-addr `dig +short redis-4.redis.web.svc.cluster.local`:6379

# 将redis-5加入redis-2
redis-trib.py replicate \
--master-addr `dig +short redis-2.redis.web.svc.cluster.local`:6379 \
--slave-addr `dig +short redis-5.redis.web.svc.cluster.local`:6379

域名格式FQDN: $(podname).(service name).namespace.svc.cluster.local

1.7 验证Redis Cluster

集群状态

[root@k8s-master2 redis-cluster]#kubectl exec -it redis-0 bash -n web
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@redis-0 /]# redis-cli 
127.0.0.1:6379> CLUSTER INFO
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:5
cluster_my_epoch:2
cluster_stats_messages_ping_sent:453
cluster_stats_messages_pong_sent:474
cluster_stats_messages_meet_sent:3
cluster_stats_messages_sent:930
cluster_stats_messages_ping_received:472
cluster_stats_messages_pong_received:456
cluster_stats_messages_meet_received:2
cluster_stats_messages_received:930

集群节点

127.0.0.1:6379> CLUSTER NODES
0c3d2b0c37ea75f44c3a9cf8be2bfed459ce8e1a 10.200.107.194:6379@16379 myself,master - 0 1675518521000 2 connected 10923-16383
5b7b529ffd7139780f96b7fe8283bc111e00060c 10.200.135.230:6379@16379 slave 7418f032450f701afb2d5d827570e9dbe500c5b9 0 1675518522000 5 connected
fea6e1d8b6a89a5805fa7689355f9deb3015eda1 10.200.159.129:6379@16379 master - 0 1675518522681 0 connected 5462-10922
7418f032450f701afb2d5d827570e9dbe500c5b9 10.200.224.1:6379@16379 master - 0 1675518522581 1 connected 0-5461
75e2f99310e99cd01e9fb63878204b497420631c 10.200.135.229:6379@16379 slave 0c3d2b0c37ea75f44c3a9cf8be2bfed459ce8e1a 0 1675518522000 3 connected
1926b17537d369f3c4b4cfd00609c374ef9aa632 10.200.224.2:6379@16379 slave fea6e1d8b6a89a5805fa7689355f9deb3015eda1 0 1675518522581 4 connected

测试在master写入数据

[root@k8s-master2 redis-cluster]#kubectl exec -it redis-1 bash -n web
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@redis-1 /]# redis-cli 
127.0.0.1:6379> set key1 value1
OK

在slave验证数据

[root@k8s-master2 redis-cluster]#kubectl exec -it redis-4 bash -n web
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
[root@redis-4 /]# redis-cli 
127.0.0.1:6379> keys *
1) "key1"

二、基于StatefulSet控制器运行MySQL一主多从

2.1 构建镜像

https://github.com/docker-library/official-images

准备基础镜像

# xtrabackup镜像
docker pull registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0
docker tag registry.cn-hangzhou.aliyuncs.com/hxpdocker/xtrabackup:1.0 harbor.chu.net/baseimages/xtrabackup:1.0
docker push harbor.chu.net/baseimages/xtrabackup:1.0

#mysql镜像
docker pull mysql:5.7.36
docker tag mysql:5.7.36 harbor.chu.net/baseimages/mysql:5.7.36
docker push harbor.chu.net/baseimages/mysql:5.7.36

2.2 创建PV

PVC会自动基于PV创建,只需要有多个可用PV即可,PV数量取决于计划启动多少mysql pod,本次创建5个PV,也就最多启动5个mysql pod。

2.2.1 创建数据目录

# NFS 服务器创建数据目录,nfs服务器:10.0.0.101
#apt install -y nfs-server
mkdir -p /data/k8sdata/web/{mysql-datadir-1,mysql-datadir-2,mysql-datadir-3,mysql-datadir-4,mysql-datadir-5,mysql-datadir-6}

#echo "/data/k8sdata *(rw,sync,no_root_squash,no_subtree_check)" >> /etc/exports

#exportfs -r

2.2.2 创建PV

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-1
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-1 
    server: 10.0.0.101
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-2
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-2
    server: 10.0.0.101
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-3
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-3
    server: 10.0.0.101
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-4
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-4
    server: 10.0.0.101
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-5
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-5
    server: 10.0.0.101
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: mysql-datadir-6
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    path: /data/k8sdata/web/mysql-datadir-6
    server: 10.0.0.101

验证PV

[root@k8s-deploy pv]#kubectl apply -f mysql-persistentvolume.yaml
[root@k8s-deploy pv]#kubectl get pv
NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                         STORAGECLASS   REASON   AGE
mysql-datadir-1          50Gi       RWO            Retain           Available                                                         2s
mysql-datadir-2          50Gi       RWO            Retain           Available                                                         2s
mysql-datadir-3          50Gi       RWO            Retain           Available                                                         2s
mysql-datadir-4          50Gi       RWO            Retain           Available                                                         2s
mysql-datadir-5          50Gi       RWO            Retain           Available                                                         2s
mysql-datadir-6          50Gi       RWO            Retain           Available                                                         2s

2.3 运行mysql服务

2.3.1 创建configmap配置文件

apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  namespace: web
  labels:
    app: mysql
data:
  master.cnf: |
    # Apply this config only on the master.
    [mysqld]
    log-bin
    log_bin_trust_function_creators=1
    lower_case_table_names=1
  slave.cnf: |
    # Apply this config only on slaves.
    [mysqld]
    super-read-only
    log_bin_trust_function_creators=1

验证configmap

[root@k8s-deploy mysql]#kubectl get cm -n web
NAME               DATA   AGE
kube-root-ca.crt   1      3d13h
mysql              2      6s
redis-conf         1      14h

[root@k8s-deploy mysql]#kubectl describe cm mysql -n web
Name:         mysql
Namespace:    web
Labels:       app=mysql
Annotations:  <none>

Data
====
slave.cnf:
----
# Apply this config only on slaves.
[mysqld]
super-read-only
log_bin_trust_function_creators=1

master.cnf:
----
# Apply this config only on the master.
[mysqld]
log-bin
log_bin_trust_function_creators=1
lower_case_table_names=1


BinaryData
====

Events:  <none>

2.3.2 创建service

# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1
kind: Service
metadata:
  namespace: web
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None		# 无头服务
  selector:
    app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the master: mysql-0.mysql.
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  namespace: web
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql

验证service

[root@k8s-deploy mysql]#kubectl apply -f mysql-services.yaml 
service/mysql created
service/mysql-read created
[root@k8s-deploy mysql]#kubectl get svc -n web
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
mysql        ClusterIP   None             <none>        3306/TCP                                       12s
mysql-read   ClusterIP   10.100.103.71    <none>        3306/TCP                                       12s

2.3.3 基于statefulset创建mysql服务

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
  namespace: web
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: harbor.chu.net/baseimages/mysql:5.7.36 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: harbor.chu.net/baseimages/xtrabackup:1.0 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: harbor.chu.net/baseimages/mysql:5.7.36 
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 500m
            memory: 1Gi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1
      - name: xtrabackup
        image: harbor.chu.net/baseimages/xtrabackup:1.0 
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql
          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave.
            mv xtrabackup_slave_info change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm xtrabackup_binlog_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi
          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
            echo "Initializing replication from clone position"
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
            mysql -h 127.0.0.1 <<EOF
          $(<change_master_to.sql.orig),
            MASTER_HOST='mysql-0.mysql',
            MASTER_USER='root',
            MASTER_PASSWORD='',
            MASTER_CONNECT_RETRY=10;
          START SLAVE;
          EOF
          fi
          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
      volumes:
      - name: conf
        emptyDir: {}
      - name: config-map
        configMap:
          name: mysql
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 10Gi

滚动创建

[root@k8s-deploy mysql]#kubectl apply -f mysql-statefulset.yaml

[root@k8s-deploy mysql]#kubectl get pod -n web
NAME                          READY   STATUS    RESTARTS     AGE
mysql-0                       2/2     Running   0            62s
mysql-1                       1/2     Running   1 (4s ago)   32s

[root@k8s-deploy mysql]#kubectl get pod -n web
NAME                          READY   STATUS     RESTARTS      AGE
mysql-0                       2/2     Running    0             75s
mysql-1                       2/2     Running    1 (17s ago)   45s
mysql-2                       0/2     Init:0/2   0             10s

pod创建完成

[root@k8s-deploy mysql]#kubectl get pod -n web
NAME                          READY   STATUS    RESTARTS      AGE
mysql-0                       2/2     Running   0             2m36s
mysql-1                       2/2     Running   1 (98s ago)   2m6s
mysql-2                       2/2     Running   1 (64s ago)   91s

2.4 验证mysql服务

2.4.1 登录dashboard查看pod状态

2.4.2 验证MySQL主从同步

# 查看pod
[root@k8s-deploy mysql]#kubectl get pod -n web
NAME                          READY   STATUS    RESTARTS        AGE
mysql-0                       2/2     Running   0               11m
mysql-1                       2/2     Running   1 (10m ago)     10m
mysql-2                       2/2     Running   1 (9m44s ago)   10m

# 进入mysql slave
[root@k8s-deploy mysql]#kubectl exec -it mysql-1 sh -n web
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulted container "mysql" out of: mysql, xtrabackup, init-mysql (init), clone-mysql (init)
# mysql							# 进入mysql
Welcome to the MySQL monitor.  Commands end with ; or \g.
mysql> show slave status\G;				# 查看slave状态
*************************** 1. row ***************************
               Slave_IO_State: Waiting for master to send event
                  Master_Host: mysql-0.mysql
                  Master_User: root
                  Master_Port: 3306
                Connect_Retry: 10
              Master_Log_File: mysql-0-bin.000003
          Read_Master_Log_Pos: 154
               Relay_Log_File: mysql-1-relay-bin.000002
                Relay_Log_Pos: 322
        Relay_Master_Log_File: mysql-0-bin.000003
             Slave_IO_Running: Yes			# IO线程为Yes
            Slave_SQL_Running: Yes			# SQL线程为Yes
......

2.4.3 验证高可用

分别删除master与slave节点pod,验证MySQL服务恢复到正常状态

  1. 删除MySQL Master
# 查看pod
[root@k8s-deploy mysql]#kubectl get pod -n web
NAME                          READY   STATUS    RESTARTS      AGE
mysql-0                       2/2     Running   0             68m
mysql-1                       2/2     Running   1 (67m ago)   67m
mysql-2                       2/2     Running   1 (66m ago)   67m

# 删除mater pod
[root@k8s-deploy mysql]#kubectl delete pod mysql-0 -n web
pod "mysql-0" deleted

# master pod恢复正常
[root@k8s-deploy mysql]#kubectl get pod -n web
NAME                          READY   STATUS    RESTARTS      AGE
mysql-0                       2/2     Running   0             18s
mysql-1                       2/2     Running   1 (68m ago)   68m
mysql-2                       2/2     Running   1 (67m ago)   68m
  1. 删除MySQL Slave
# 删除slave2
[root@k8s-deploy mysql]#kubectl delete pod mysql-2 -n web
pod "mysql-2" deleted

# slave2 pod恢复正常
[root@k8s-deploy mysql]#kubectl get pod -n web
NAME                          READY   STATUS    RESTARTS      AGE
mysql-0                       2/2     Running   0             3m4s
mysql-1                       2/2     Running   1 (71m ago)   71m
mysql-2                       2/2     Running   0             29s

三、实现单Pod多容器并实现LNMP且上一步骤的MySQL pod作为SQL服务器

LNMP可基于Nginx+PHP实现WordPress博客站点,Nginx+PHP运行在同一个pod中。

WordPress官网:https://cn.wordpress.org

3.1 准备镜像

3.1.1 PHP镜像

  • 官方镜像
docker pull php:5.6.40-fpm
docker tag php:5.6.40-fpm harbor.chu.net/baseimages/php:5.6.40-fpm
docker push harbor.chu.net/baseimages/php:5.6.40-fpm
  • 自制镜像

配置文件

[root@k8s-deploy php]#cat www.conf |egrep -v '^;|^\s*$'
[www]
user = nginx
group = nginx
listen = 0.0.0.0:9000
pm = dynamic
pm.max_children = 50
pm.start_servers = 5
pm.min_spare_servers = 5
pm.max_spare_servers = 35
slowlog = /opt/remi/php56/root/var/log/php-fpm/www-slow.log
php_admin_value[error_log] = /opt/remi/php56/root/var/log/php-fpm/www-error.log
php_admin_flag[log_errors] = on
php_value[session.save_handler] = files
php_value[session.save_path]    = /opt/remi/php56/root/var/lib/php/session
php_value[soap.wsdl_cache_dir]  = /opt/remi/php56/root/var/lib/php/wsdlcache

运行脚本

[root@k8s-deploy php]#cat run_php.sh 
#!/bin/bash
/opt/remi/php56/root/usr/sbin/php-fpm
tail -f /etc/hosts

编写Dockerfile

#PHP Base Image
FROM harbor.chu.net/baseimages/centos-base:7.9.2009
RUN yum install -y https://mirrors.tuna.tsinghua.edu.cn/remi/enterprise/remi-release-7.rpm && yum install -y php56-php-fpm php56-php-mysql
ADD www.conf /opt/remi/php56/root/etc/php-fpm.d/www.conf
#RUN useradd nginx -u 2019
ADD run_php.sh /usr/local/bin/run_php.sh
RUN chmod a+x /usr/local/bin/run_php.sh
EXPOSE 9000

CMD ["/usr/local/bin/run_php.sh"]

构建镜像

docker build -t harbor.chu.net/web/wordpress-php-5.6:v1 .
docker push  harbor.chu.net/web/wordpress-php-5.6:v1

3.1.2 Nginx镜像

配置文件

# nginx.conf
[root@k8s-deploy nginx]#egrep -v '^\s*#|^$' nginx.conf 
user  nginx nginx;
worker_processes  auto;
events {
    worker_connections  1024;
}
http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    client_max_body_size 10M;
    client_body_buffer_size 16k;
    client_body_temp_path  /usr/local/nginx/client_body_temp   1 2 2;
    gzip  on;
    server {
        listen       80;
        server_name  blogs.magedu.net;
        location / {
            root    /home/nginx/wordpress;
            index   index.php index.html index.htm;
        }
        location ~ \.php$ {
            root           /home/nginx/wordpress;
            fastcgi_pass   127.0.0.1:9000;
            fastcgi_index  index.php;
            fastcgi_param  SCRIPT_FILENAME  $document_root$fastcgi_script_name;
             include        fastcgi_params;
        }
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}

运行脚本

[root@k8s-deploy nginx]#cat run_nginx.sh 
#!/bin/bash
#chown nginx:nginx /home/nginx/wordpress/ -R
/usr/local/nginx/sbin/nginx
tail -f /etc/hosts

编写Dockerfile

harbor.chu.net/baseimages/nginx-base:1.22.0​ 镜像制作参考: nginx基础镜像制作[1]

FROM harbor.chu.net/baseimages/nginx-base:1.22.0 

ADD nginx.conf /usr/local/nginx/conf/nginx.conf
ADD run_nginx.sh /run_nginx.sh
RUN chmod a+x /run_nginx.sh && mkdir -p /home/nginx/wordpress && chown nginx:nginx /home/nginx/wordpress/ -R

EXPOSE 80 443

CMD ["/run_nginx.sh"]

构建镜像

docker build -t harbor.chu.net/web/wordpress-nginx-1.22.0:v1 .
docker push harbor.chu.net/web/wordpress-nginx-1.22.0:v1

3.2 运行WordPress站点

PHP镜像使用自制镜像,WordPress页面保存至后端NFS服务器

# NFS:10.0.0.101
mkdir -p /data/k8sdata/web/wordpress

# 下载并解压WordPress
wget https://cn.wordpress.org/wordpress-6.0-zh_CN.tar.gz
tar xvf wordpress-6.0-zh_CN.tar.gz -C /data/k8sdata/web/

# 授权,用户(组)ID与基础镜像中nginx用户ID一致
chown -R 2088:2088 /data/k8sdata/web/wordpress

3.2.1 运行WordPress

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wordpress-app
  template:
    metadata:
      labels:
        app: wordpress-app
    spec:
      containers:
      - name: wordpress-app-nginx
        image: harbor.chu.net/web/wordpress-nginx-1.22.0:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      - name: wordpress-app-php
        image: harbor.chu.net/web/wordpress-php-5.6:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 9000
          protocol: TCP
          name: http
        volumeMounts:
        - name: wordpress
          mountPath: /home/nginx/wordpress
          readOnly: false

      volumes:
      - name: wordpress
        nfs:
          server: 10.0.0.101
          path: /data/k8sdata/web/wordpress


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: wordpress-app
  name: wordpress-app-spec
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30041
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30043
  selector:
    app: wordpress-app

验证

[root@k8s-deploy wordpress]#kubectl get pod -n web
NAME                                        READY   STATUS    RESTARTS        AGE
mysql-0                                     2/2     Running   0               3h49m
mysql-1                                     2/2     Running   1 (4h57m ago)   4h57m
mysql-2                                     2/2     Running   0               3h46m
wordpress-app-deployment-66968c7775-4577b   2/2     Running   0               16s

[root@k8s-deploy wordpress]#kubectl get svc -n web
NAME                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
mysql                ClusterIP   None             <none>        3306/TCP                                       5h18m
mysql-read           ClusterIP   10.100.103.71    <none>        3306/TCP                                       5h18m
wordpress-app-spec   NodePort    10.100.134.182   <none>        80:30041/TCP,443:30043/TCP                     36s

3.2.2 创建PHP测试页

# 后端NFS服务器创建
[root@harbor1 ~]#cat /data/k8sdata/web/wordpress/test.php 
<? php
    phpinfo();
?>

[root@harbor1 ~]#chown 2088:2088 /data/k8sdata/web/wordpress/test.php 

3.2.3 访问PHP测试页

3.3 初始化WordPress

使用第二章节中mysql服务作为mysql服务器

3.3.1 MySQL创建数据库

  • 查看mysql pod名称
[root@k8s-deploy nginx]#kubectl get pod -n web
NAME                                        READY   STATUS    RESTARTS        AGE
mysql-0                                     2/2     Running   0               4h14m
mysql-1                                     2/2     Running   1 (5h22m ago)   5h22m
mysql-2                                     2/2     Running   0               4h11m
wordpress-app-deployment-66968c7775-4577b   2/2     Running   0               25m
  • 进入mysql master pod,创建用户WordPress,授权
# 进入mysql容器
[root@k8s-deploy nginx]#kubectl exec -it mysql-0 bash -n web
# 登录mysql
root@mysql-0:/# mysql
# 创建wordpress数据库
mysql> create database wordpress;
Query OK, 1 row affected (0.03 sec)

# 创建wordpress用户(密码wordpress)并授权
mysql> grant all privileges on wordpress.* to "wordpress"@"%" identified by "wordpress";
Query OK, 0 rows affected, 1 warning (0.02 sec)

3.3.2 测试MySQL连接

# 进入mysql slave容器
[root@k8s-deploy nginx]#kubectl exec -it mysql-2 bash -n web

#使用wordpress用户连接mysql master数据库
root@mysql-2:/# mysql -hmysql-0.mysql -uwordpress -pwordpress

# 查看数据库
mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| wordpress          |
+--------------------+
2 rows in set (0.00 sec)

3.3.3 初始数据库

  1. 浏览器访问http://10.0.0.41:30041,登录web界面进行初始化

  1. 数据库主机名格式为:mysql pod name +service name

注意,若和mysql不在同一个namespace时,数据库主机名需补全service名称

  1. 设置站点标题、登录用户名等

  1. 完成安装

  1. 登录

  1. 进入首页

3.4 验证mysql数据

分别验证mMySQL主库和从库中的数据

3.4.1 master数据库

# 登录数据库
[root@k8s-deploy nginx]#kubectl exec -it mysql-0 mysql -n web
# 切换wordpress用户
mysql> use wordpress;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
# 查看wordpress用户下的表信息
mysql> show tables;
+-----------------------+
| Tables_in_wordpress   |
+-----------------------+
| wp_commentmeta        |
| wp_comments           |
| wp_links              |
| wp_options            |
| wp_postmeta           |
| wp_posts              |
| wp_term_relationships |
| wp_term_taxonomy      |
| wp_termmeta           |
| wp_terms              |
| wp_usermeta           |
| wp_users              |
+-----------------------+
12 rows in set (0.00 sec)

3.4.2 Slave数据库

[root@k8s-deploy nginx]#kubectl exec -it mysql-1 mysql -n web
mysql> use wordpress;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A

Database changed
mysql> show tables;
+-----------------------+
| Tables_in_wordpress   |
+-----------------------+
| wp_commentmeta        |
| wp_comments           |
| wp_links              |
| wp_options            |
| wp_postmeta           |
| wp_posts              |
| wp_term_relationships |
| wp_term_taxonomy      |
| wp_termmeta           |
| wp_terms              |
| wp_usermeta           |
| wp_users              |
+-----------------------+
12 rows in set (0.00 sec)

四、基于Zookeeper案例实现微服务动态注册和发现案例

基于dubbo实现生产者与消费者。

dubbo官网:https://cn.dubbo.apache.org/zh/

4.1 运行provider/生产者

4.1.1 准备镜像

dubbo-demo-provider-2.1.5

# dubbo-demo-provider内容
[root@k8s-deploy provider]#tree dubbo-demo-provider-2.1.5
dubbo-demo-provider-2.1.5
├── bin
│   ├── dump.sh
│   ├── restart.sh
│   ├── server.sh
│   ├── start.bat
│   ├── start.sh
│   └── stop.sh
├── conf
│   └── dubbo.properties
└── lib
    ├── cache-api-0.4.jar
    ├── commons-codec-1.4.jar
    ├── commons-logging-1.1.1.jar
    ├── commons-pool-1.5.5.jar
    ├── dubbo-2.1.5.jar
    ├── dubbo-demo-2.1.5.jar
    ├── dubbo-demo-provider-2.1.5.jar
    ├── fastjson-1.1.8.jar
    ├── gmbal-api-only-3.0.0-b023.jar
    ├── grizzly-core-2.1.4.jar
    ├── grizzly-framework-2.1.4.jar
    ├── grizzly-portunif-2.1.4.jar
    ├── grizzly-rcm-2.1.4.jar
    ├── hessian-4.0.7.jar
    ├── hibernate-validator-4.2.0.Final.jar
    ├── httpclient-4.1.2.jar
    ├── httpcore-4.1.2.jar
    ├── javassist-3.15.0-GA.jar
    ├── jedis-2.0.0.jar
    ├── jetty-6.1.26.jar
    ├── jetty-util-6.1.26.jar
    ├── jline-0.9.94.jar
    ├── log4j-1.2.16.jar
    ├── management-api-3.0.0-b012.jar
    ├── mina-core-1.1.7.jar
    ├── netty-3.2.5.Final.jar
    ├── servlet-api-2.5-20081211.jar
    ├── slf4j-api-1.6.2.jar
    ├── spring-2.5.6.SEC03.jar
    ├── validation-api-1.0.0.GA.jar
    └── zookeeper-3.3.3.jar

# 根据实际修改zookeeper连接地址
[root@k8s-deploy provider]#cat dubbo-demo-provider-2.1.5/conf/dubbo.properties 
......
dubbo.registry.address=zookeeper://zookeeper1.web.svc.cluster.local:2181 | zookeeper://zookeeper2.web.svc.cluster.local:2181 | zookeeper://zookeeper3.web.svc.cluster.local:2181

run_java.sh​启动脚本

#!/bin/bash
su - nginx -c "/apps/dubbo/provider/bin/start.sh"
tail -f /etc/hosts

编写Dockerfile

jdk基础镜像制作: 5.1.1 JDK基础镜像制作[2]

#Dubbo provider
FROM harbor.chu.net/baseimages/jdk-base:v8.212

RUN yum install file nc -y
RUN mkdir -p /apps/dubbo/provider
ADD dubbo-demo-provider-2.1.5/  /apps/dubbo/provider
ADD run_java.sh /apps/dubbo/provider/bin 
RUN chown nginx:nginx /apps -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh

CMD ["/apps/dubbo/provider/bin/run_java.sh"]

构建镜像

# 脚本
[root@k8s-deploy provider]#cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.chu.net/web/dubbo-demo-provider:${TAG} .
sleep 3
docker push harbor.chu.net/web/dubbo-demo-provider:${TAG}

# 构建
[root@k8s-deploy provider]#bash build-command.sh v1

4.1.2 运行provider服务

编辑yaml文件

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-provider
  name: web-provider-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-provider
  template:
    metadata:
      labels:
        app: web-provider
    spec:
      containers:
      - name: web-provider-container
        image: harbor.chu.net/web/dubbo-demo-provider:v1 
        imagePullPolicy: Always
        ports:
        - containerPort: 20880
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-provider
  name: web-provider-spec
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 20880
    #nodePort: 30001
  selector:
    app: web-provider

验证状态

# 创建provider服务
kubectl apply -f provider.yaml

[root@k8s-deploy provider]#kubectl get pod -n web
NAME                                       READY   STATUS    RESTARTS      AGE
web-provider-deployment-7dd6d8dcf7-vxt2g   1/1     Running   0             111s
zookeeper1-56679f8f44-crbmq                1/1     Running   0             36h
zookeeper2-5cd9f77979-slsxd                1/1     Running   0             36h
zookeeper3-5b75f6546b-8bpmv                1/1     Running   0             35h


[root@k8s-deploy provider]#kubectl get svc -n web
NAME                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
web-provider-spec   NodePort    10.100.224.141   <none>        80:61033/TCP                                   87s
zookeeper           ClusterIP   10.100.136.6     <none>        2181/TCP                                       3d19h
zookeeper1          NodePort    10.100.151.76    <none>        2181:32181/TCP,2888:47512/TCP,3888:51623/TCP   3d19h
zookeeper2          NodePort    10.100.161.252   <none>        2181:32182/TCP,2888:60798/TCP,3888:40464/TCP   3d19h
zookeeper3          NodePort    10.100.133.38    <none>        2181:32183/TCP,2888:47700/TCP,3888:43025/TCP   3d19h

4.1.3 zookeeper验证provider注册

使用ZooInspector工具连接zookeeper查看

4.2 运行consumer/消费者

4.2.1 准备镜像

dubbo-demo-consumer-2.1.5​文件

[root@k8s-deploy consumer]#tree dubbo-demo-consumer-2.1.5
dubbo-demo-consumer-2.1.5
├── bin
│   ├── dump.sh
│   ├── restart.sh
│   ├── server.sh
│   ├── start.bat
│   ├── start.sh
│   └── stop.sh
├── conf
│   └── dubbo.properties
└── lib
    ├── cache-api-0.4.jar
    ├── commons-codec-1.4.jar
    ├── commons-logging-1.1.1.jar
    ├── commons-pool-1.5.5.jar
    ├── dubbo-2.1.5.jar
    ├── dubbo-demo-2.1.5.jar
    ├── dubbo-demo-consumer-2.1.5.jar
    ├── fastjson-1.1.8.jar
    ├── gmbal-api-only-3.0.0-b023.jar
    ├── grizzly-core-2.1.4.jar
    ├── grizzly-framework-2.1.4.jar
    ├── grizzly-portunif-2.1.4.jar
    ├── grizzly-rcm-2.1.4.jar
    ├── hessian-4.0.7.jar
    ├── hibernate-validator-4.2.0.Final.jar
    ├── httpclient-4.1.2.jar
    ├── httpcore-4.1.2.jar
    ├── javassist-3.15.0-GA.jar
    ├── jedis-2.0.0.jar
    ├── jetty-6.1.26.jar
    ├── jetty-util-6.1.26.jar
    ├── jline-0.9.94.jar
    ├── log4j-1.2.16.jar
    ├── management-api-3.0.0-b012.jar
    ├── mina-core-1.1.7.jar
    ├── netty-3.2.5.Final.jar
    ├── servlet-api-2.5-20081211.jar
    ├── slf4j-api-1.6.2.jar
    ├── spring-2.5.6.SEC03.jar
    ├── validation-api-1.0.0.GA.jar
    └── zookeeper-3.3.3.jar


# 根据实际修改zookeeper连接地址
[root@k8s-deploy consumer]#cat dubbo-demo-consumer-2.1.5/conf/dubbo.properties 
......
dubbo.registry.address=zookeeper://zookeeper1.web.svc.cluster.local:2181 | zookeeper://zookeeper2.web.svc.cluster.local:2181 | zookeeper://zookeeper3.web.svc.cluster.local:2181

run_java.sh​启动脚本

#!/bin/bash
#echo "nameserver 223.6.6.6" > /etc/resolv.conf
#/usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat  &
su - nginx -c "/apps/dubbo/consumer/bin/start.sh"
tail -f /etc/hosts

编写Dockerfile

#Dubbo consumer
FROM harbor.chu.net/baseimages/jdk-base:v8.212 

RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer 
ADD dubbo-demo-consumer-2.1.5  /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh

CMD ["/apps/dubbo/consumer/bin/run_java.sh"]

构建镜像

# 脚本
[root@k8s-deploy consumer]#cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.chu.net/web/dubbo-demo-consumer:${TAG} .
sleep 3
docker push harbor.chu.net/web/dubbo-demo-consumer:${TAG}

# 构建
[root@k8s-deploy consumer]#bash build-command.sh v1

4.2.2 运行consumer服务

编写yaml文件

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-consumer
  name: web-consumer-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-consumer
  template:
    metadata:
      labels:
        app: web-consumer
    spec:
      containers:
      - name: web-consumer-container
        image: harbor.chu.net/web/dubbo-demo-consumer:v1 
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-consumer
  name: web-consumer-server
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    #nodePort: 30001
  selector:
    app: web-consumer

验证状态

[root@k8s-deploy consumer]#kubectl apply -f consumer.yaml 

[root@k8s-deploy consumer]#kubectl get pod -n web
NAME                                       READY   STATUS              RESTARTS      AGE
web-consumer-deployment-5948dcb6d6-hp45w   0/1     ContainerCreating   0             15s
web-provider-deployment-7dd6d8dcf7-vxt2g   1/1     Running             0             23m
zookeeper1-56679f8f44-crbmq                1/1     Running             0             36h
zookeeper2-5cd9f77979-slsxd                1/1     Running             0             36h
zookeeper3-5b75f6546b-8bpmv                1/1     Running             0             36h

[root@k8s-deploy consumer]#kubectl get svc -n web
NAME                  TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
web-consumer-server   NodePort    10.100.148.88    <none>        80:39194/TCP                                   19s
web-provider-spec     NodePort    10.100.224.141   <none>        80:61033/TCP                                   23m
zookeeper             ClusterIP   10.100.136.6     <none>        2181/TCP                                       3d20h
zookeeper1            NodePort    10.100.151.76    <none>        2181:32181/TCP,2888:47512/TCP,3888:51623/TCP   3d20h
zookeeper2            NodePort    10.100.161.252   <none>        2181:32182/TCP,2888:60798/TCP,3888:40464/TCP   3d20h
zookeeper3            NodePort    10.100.133.38    <none>        2181:32183/TCP,2888:47700/TCP,3888:43025/TCP   3d20h

4.2.3 zookeeper验证消费者

4.3 运行dubboadmin

4.3.1 准备镜像

准备文件

[root@k8s-deploy dubboadmin]#pwd
/data/dockerfile/web/web/dubbo/dubboadmin
[root@k8s-deploy dubboadmin]#ls -l
total 27176
-rw-r--r-- 1 root root      632 Feb  5 01:26 Dockerfile
-rw-r--r-- 1 root root      145 Feb  5 01:26 build-command.sh
-rw-r--r-- 1 root root    22201 Feb  5 01:26 catalina.sh
drwxr-xr-x 8 root root      132 Feb  5 01:26 dubboadmin
-rw-r--r-- 1 root root 27777982 Feb  5 01:26 dubboadmin.war
-rw-r--r-- 1 root root     3436 Feb  5 01:26 logging.properties
-rw-r--r-- 1 root root       99 Feb  5 01:26 run_tomcat.sh
-rw-r--r-- 1 root root     6427 Feb  5 01:26 server.xml

# 修改zookeeper地址
[root@k8s-deploy dubboadmin]#cat dubboadmin/WEB-INF/dubbo.properties
dubbo.registry.address=zookeeper://zookeeper1.web.svc.cluster.local:2181
dubbo.admin.root.password=root
dubbo.admin.guest.password=guest

# 重新打包
[root@k8s-deploy dubboadmin]#zip -r dubboadmin.war dubboadmin/*

编写Dockerfile

tomcat基础镜像制作: 5.1.2 tomcat基础镜像制作[3]

#Dubbo dubboadmin
FROM harbor.chu.net/baseimages/tomcat-base:v8.5.43

RUN yum install unzip -y  
ADD server.xml /apps/tomcat/conf/server.xml
ADD logging.properties /apps/tomcat/conf/logging.properties
ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD dubboadmin.war  /data/tomcat/webapps/dubboadmin.war
RUN chmod a+x /apps/tomcat/bin/* && cd /data/tomcat/webapps && unzip dubboadmin.war && rm -rf dubboadmin.war && chown -R nginx:nginx /data /apps

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]

构建镜像

# 脚本
[root@k8s-deploy dubboadmin]#cat build-command.sh 
#!/bin/bash
TAG=$1
docker build -t harbor.chu.net/web/dubboadmin:${TAG} .
sleep 3
docker push  harbor.chu.net/web/dubboadmin:${TAG}

# 构建
[root@k8s-deploy dubboadmin]#bash build-command.sh v1

4.3.2 运行服务

编辑yaml文件

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-dubboadmin
  name: web-dubboadmin-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-dubboadmin
  template:
    metadata:
      labels:
        app: web-dubboadmin
    spec:
      containers:
      - name: web-dubboadmin-container
        image: harbor.chu.net/web/dubboadmin:v1 
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-dubboadmin
  name: web-dubboadmin-service
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30080
  selector:
    app: web-dubboadmin

验证状态

# 创建
[root@k8s-deploy dubboadmin]#kubectl apply -f dubboadmin.yaml 
deployment.apps/web-dubboadmin-deployment created
service/web-dubboadmin-service created

# 查看pod
[root@k8s-deploy dubboadmin]#kubectl get pod -n web
NAME                                         READY   STATUS    RESTARTS      AGE
web-consumer-deployment-5948dcb6d6-hp45w     1/1     Running   0             22m
web-dubboadmin-deployment-7d4fdcc854-w5q5q   1/1     Running   0             65s
web-provider-deployment-7dd6d8dcf7-vxt2g     1/1     Running   0             45m
zookeeper1-56679f8f44-crbmq                  1/1     Running   0             36h
zookeeper2-5cd9f77979-slsxd                  1/1     Running   0             36h
zookeeper3-5b75f6546b-8bpmv                  1/1     Running   0             36h

# 查看service
[root@k8s-deploy dubboadmin]#kubectl get svc -n web
NAME                     TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE                                      26h
web-consumer-server      NodePort    10.100.148.88    <none>        80:39194/TCP                                   21m
web-dubboadmin-service   NodePort    10.100.110.57    <none>        80:30080/TCP                                   9s
web-provider-spec        NodePort    10.100.224.141   <none>        80:61033/TCP                                   44m
zookeeper                ClusterIP   10.100.136.6     <none>        2181/TCP                                       3d20h
zookeeper1               NodePort    10.100.151.76    <none>        2181:32181/TCP,2888:47512/TCP,3888:51623/TCP   3d20h
zookeeper2               NodePort    10.100.161.252   <none>        2181:32182/TCP,2888:60798/TCP,3888:40464/TCP   3d20h
zookeeper3               NodePort    10.100.133.38    <none>        2181:32183/TCP,2888:47700/TCP,3888:43025/TCP   3d20h

4.3.3 验证生产者与消费者

  • 浏览器访问宿主机:30080,或可配置负载均衡

用户名:root,密码:root

  • 进入首页

服务治理--提供者

系统管理--系统状态


  1. nginx基础镜像制作

    • 编写Dockerfile
    FROM harbor.chu.net/baseimages/centos-base:7.9.2009
    
    RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop
    ADD nginx-1.22.0.tar.gz /usr/local/src/
    RUN cd /usr/local/src/nginx-1.22.0 && ./configure  && make && make install && ln -sv /usr/local/nginx/sbin/nginx /usr/sbin/nginx  &&rm -rf /usr/local/src/nginx-1.22.0.tar.gz 
    
    • 构建镜像,并上传至本地harbor仓库
    nerdctl build -t harbor.chu.net/baseimages/nginx-base:1.22.0 .
    nerdctl push harbor.chu.net/baseimages/nginx-base:1.22.0
    
    ↩︎
  2. 5.1.1 JDK基础镜像制作

    • 编写Dockerfile
    #JDK Base Image
    FROM harbor.chu.net/baseimages/centos-base:7.9.2009
    
    ADD jdk-8u212-linux-x64.tar.gz /usr/local/src/
    RUN ln -sv /usr/local/src/jdk1.8.0_212 /usr/local/jdk 
    ADD profile /etc/profile
    
    ENV JAVA_HOME /usr/local/jdk
    ENV JRE_HOME $JAVA_HOME/jre
    ENV CLASSPATH $JAVA_HOME/lib/:$JRE_HOME/lib/
    ENV PATH $PATH:$JAVA_HOME/bin
    
    • 构建镜像,并上传至harbor仓库
    nerdctl build -t harbor.chu.net/baseimages/jdk-base:v8.212 .
    nerdctl push harbor.chu.net/baseimages/jdk-base:v8.212
    

    ↩︎

  3. 5.1.2 tomcat基础镜像制作

    • 编写Dockerfile
    #Tomcat 8.5.43基础镜像
    FROM harbor.chu.net/baseimages/jdk-base:v8.212
    
    RUN mkdir -p /apps /data/tomcat/webapps /data/tomcat/logs
    ADD apache-tomcat-8.5.43.tar.gz  /apps
    RUN useradd tomcat -u 2050 && ln -sv /apps/apache-tomcat-8.5.43 /apps/tomcat && chown -R tomcat:tomcat /apps /data
    
    • 构建镜像,并上传至harbor仓库
    nerdctl build -t harbor.chu.net/baseimages/tomcat-base:v8.5.43 .
    nerdctl push harbor.chu.net/baseimages/tomcat-base:v8.5.43
    
    ↩︎

标签:web,name,kubernetes,redis,cluster,mysql,root
From: https://www.cnblogs.com/areke/p/17095197.html

相关文章

  • Kubernetes的 pod 重启策略、Pod状态、生命周期
    Pod的重启策略Pod的重启策略指的是当Pod中的容器终止退出后,重启容器的策略。需要注意的是,因为Docker容器的轻量级,重启容器的做法实际上是直接重建容器,所以容器中的数据将会......
  • SpringCloud微服务电商系统在Kubernetes集群中上线详细教程
    Kubernetes集群部署Springcloud微服务商务系统 原文网址:https://www.wangt.cc//2021/12/springcloud%E5%BE%AE%E6%9C%8D%E5%8A%A1%E7%94%B5%E5%95%86%E7%B3%BB%E7%BB%9F%......
  • kubernetes-Deployment
    Deployment它是专门用来部署应用程序的,能够让应用永不宕机,多用来发布无状态的应用,是Kubernetes里最常用也是最有用的一个对象。YAML样板apiVersion:apps/v1kind:D......
  • 【云原生kubernetes】k8s控制器Deployment使用详解
    前言在上一篇我们聊了k8s中各种控制器的使用,本篇将以控制器中比较常用的一种控制器Deployment进行详细的说明。一、Deployment简介 为了更好解决服务编排的问题,kubernete......
  • 在GCP的Kubernetes上安装dapr
    1简介我们之前使用了dapr的本地托管模式,但在生产中我们一般使用Kubernetes托管,本文介绍如何在GKE(GCPKubernetes)安装dapr。相关文章:dapr本地托管的服务调用体验与J......
  • kubernetes nodeSelector部署节点选择
    1.文档https://kubernetes.io/zh-cn/docs/concepts/scheduling-eviction/assign-pod-node/ 2.说明 nodeSelector 是节点选择约束的最简单推荐形式。你可以将 n......
  • kubetpl - kubernetes 模板管理工具
    目录Helm、Kustomize、KubetplHelmKustomizeKubetpl安装KubetplKubetpl命令参数参数选项completion-参数自动补齐render-渲染模板go-template语法注释引用变量在te......
  • kubernetes-dashboard 实现 http 访问以及免 token 登录
    目录下载官方yaml文件修改yaml文件修改service端口修改clusterrolebinding修改deployment内容修改探针检测修改镜像拉取策略修改容器端口关闭token登录增加ing......
  • kubernetes关于eks一次异常问题的复盘
    背景:海外新加坡有一套aws的eks集群,很小的规模托管的三节点(172-31-16-189节点为最近才加的,忽略):[root@ip-172-31-10-1~]#kubectlgetnodesNAME......
  • kubernetes(四)
    一、基于StatefulSet部署有状态访问、基于DaemonSet在每一个node节点部署一个prometheusnode-exporter1.1StatefulSethttps://kubernetes.io/zh-cn/docs/concepts/work......