首页 > 其他分享 >kubernetes(四)

kubernetes(四)

时间:2023-02-02 17:47:58浏览次数:69  
标签:web name kubernetes harbor nginx myserver root

一、基于StatefulSet部署有状态访问、基于DaemonSet在每一个node节点部署一个prometheus node-exporter

1.1 StatefulSet

https://kubernetes.io/zh-cn/docs/concepts/workloads/controllers/statefulset/

  • StatefulSet为了解决有状态服务的集群部署、集群之间的数据同步问题(MySQL主从等)
  • StatefulSet所管理的pod拥有唯一且固定的pod名称
  • StatefulSet按照顺序对pod进行启停、伸缩和回收
  • Headless Service(无头服务,请求的解析直接解析到Pod IP)

1.1.1 编写StatefulSet

apiVersion: apps/v1
kind: StatefulSet 
metadata:
  name: myserver-myapp
  namespace: myserver
spec:
  replicas: 3				# 默认值是 1
  serviceName: "myserver-myapp-service"	# service名称
  selector:
    matchLabels:
      app: myserver-myapp-frontend	# 必须匹配 .spec.template.metadata.labels
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend	# 必须匹配 .spec.selector.matchLabels
    spec:
      containers:
      - name: myserver-myapp-frontend
        image: harbor.chu.net/baseimages/nginx:1.20.0
        ports:
          - containerPort: 80

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-service
  namespace: myserver
spec:
  clusterIP: None		# 名为 myserver-myapp-service 的 Headless Service 用来控制网络域名。
  ports:
  - name: http
    port: 80
  selector:
    app: myserver-myapp-frontend

查看

kubectl apply -f statefulset.yaml
# statefulset信息
[root@k8s-deploy case12-Statefulset]#kubectl get sts -n myserver -owide
NAME             READY   AGE     CONTAINERS                IMAGES
myserver-myapp   3/3     2m54s   myserver-myapp-frontend   harbor.chu.net/baseimages/nginx:1.20.0

# service信息
[root@k8s-deploy case12-Statefulset]#kubectl get svc -n myserver -owide
NAME                     TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE     SELECTOR
myserver-myapp-service   ClusterIP   None         <none>        80/TCP    2m47s   app=myserver-myapp-frontend

# service对应后端pod ip:端口
[root@k8s-deploy case12-Statefulset]#kubectl get ep -n myserver
NAME                     ENDPOINTS                                             AGE
myserver-myapp-service   10.200.107.222:80,10.200.169.161:80,10.200.36.80:80   7m45s

# pod名称唯一,且按顺序命名
[root@k8s-deploy case12-Statefulset]#kubectl get pod -n myserver -owide
NAME               READY   STATUS    RESTARTS   AGE     IP               NODE        NOMINATED NODE   READINESS GATES
myserver-myapp-0   1/1     Running   0          2m58s   10.200.107.222   10.0.0.43   <none>           <none>
myserver-myapp-1   1/1     Running   0          2m56s   10.200.169.161   10.0.0.42   <none>           <none>
myserver-myapp-2   1/1     Running   0          2m54s   10.200.36.80     10.0.0.41   <none>           <none>

# 按顺序创建pod
[root@k8s-deploy yaml]#kubectl describe sts myserver-myapp -n myserver
......
Events:
  Type    Reason            Age   From                    Message
  ----    ------            ----  ----                    -------
  Normal  SuccessfulCreate  2m    statefulset-controller  create Pod myserver-myapp-0 in StatefulSet myserver-myapp successful
  Normal  SuccessfulCreate  2m    statefulset-controller  create Pod myserver-myapp-1 in StatefulSet myserver-myapp successful
  Normal  SuccessfulCreate  2m    statefulset-controller  create Pod myserver-myapp-2 in StatefulSet myserver-myapp successful

1.1.2 验证测试

# 创建测试pod
kubectl run net-test1 --image=alpine sleep 10000 -n myserver

# 进入pod测试网络,ping无头服务myserver-myapp-service,直接解析后端pod IP
[root@k8s-deploy yaml]#kubectl exec -it net-test1 -n myserver sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
/ # ping myserver-myapp-service
PING myserver-myapp-service (10.200.169.161): 56 data bytes
64 bytes from 10.200.169.161: seq=0 ttl=62 time=0.401 ms
64 bytes from 10.200.169.161: seq=1 ttl=62 time=0.389 ms

/ # ping myserver-myapp-service
PING myserver-myapp-service (10.200.36.80): 56 data bytes
64 bytes from 10.200.36.80: seq=0 ttl=62 time=0.287 ms
64 bytes from 10.200.36.80: seq=1 ttl=62 time=0.290 ms

/ # ping myserver-myapp-service
PING myserver-myapp-service (10.200.107.222): 56 data bytes
64 bytes from 10.200.107.222: seq=0 ttl=63 time=0.057 ms
64 bytes from 10.200.107.222: seq=1 ttl=63 time=0.101 ms

伸缩测试

# 缩减pod数量时,从最后往前缩减
[root@k8s-deploy yaml]#kubectl scale sts myserver-myapp -n myserver --replicas=2
statefulset.apps/myserver-myapp scaled
[root@k8s-deploy yaml]#kubectl get pod -n myserver
NAME               READY   STATUS    RESTARTS   AGE
myserver-myapp-0   1/1     Running   0          32m
myserver-myapp-1   1/1     Running   0          32m


# 增加pod数量时,依次往后增加
[root@k8s-deploy yaml]#kubectl scale sts myserver-myapp -n myserver --replicas=4
statefulset.apps/myserver-myapp scaled
[root@k8s-deploy yaml]#kubectl get pod -n myserver
NAME               READY   STATUS              RESTARTS   AGE
myserver-myapp-0   1/1     Running             0          32m
myserver-myapp-1   1/1     Running             0          32m
myserver-myapp-2   1/1     Running             0          2s
myserver-myapp-3   0/1     ContainerCreating   0          1s

1.2 DaemonSet

https://kubernetes.io/zh-cn/docs/concepts/workloads/controllers/daemonset/

DaemonSet在当前集群中每个节点运行同一个pod,当有新的节点加入集群时也会为新的节点配置相同的pod,当节点从集群移除时其pod也会被kubernetes回收,删除DaemonSet控制器时将删除其创建的所有pod

DaemonSet 的一些典型用法:

  • 在每个节点上运行集群守护进程
  • 在每个节点上运行日志收集守护进程
  • 在每个节点上运行监控守护进程

1.2.1 编写DaemonSet

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: node-exporter
  namespace: monitoring 
  labels:
    k8s-app: node-exporter
spec:
  selector:
    matchLabels:
        k8s-app: node-exporter
  template:
    metadata:
      labels:
        k8s-app: node-exporter
    spec:
      tolerations:
        # 这些容忍度设置是为了让该守护进程集在控制平面节点上运行
        # 如果不希望自己的控制平面节点运行 Pod,可以删除它们
        - effect: NoSchedule
          key: node-role.kubernetes.io/master		#执行kubectl get node --show-labels命令,可查看节点label
      containers:
      - image: harbor.chu.net/baseimages/prom-node-exporter:v1.3.1 
        imagePullPolicy: IfNotPresent			# 只有当镜像在本地不存在时才会拉取。
        name: prometheus-node-exporter
        ports:
        - containerPort: 9100
          hostPort: 9100
          protocol: TCP
          name: metrics
        volumeMounts:
        - mountPath: /host/proc
          name: proc
        - mountPath: /host/sys
          name: sys
        - mountPath: /host
          name: rootfs
        args:
        - --path.procfs=/host/proc
        - --path.sysfs=/host/sys
        - --path.rootfs=/host
      volumes:
        # 将宿主机目录挂载至pod中
        - name: proc
          hostPath:
            path: /proc
        - name: sys
          hostPath:
            path: /sys
        - name: rootfs
          hostPath:
            path: /
      hostNetwork: true		# 使用宿主机网络
      hostPID: true
---
apiVersion: v1
kind: Service
metadata:
  annotations:
    prometheus.io/scrape: "true"
  labels:
    k8s-app: node-exporter
  name: node-exporter
  namespace: monitoring 
spec:
  type: NodePort
  ports:
  - name: http
    port: 9100
    nodePort: 30022
    protocol: TCP
  selector:
    k8s-app: node-exporter

执行创建命令

# 先创建namespace
kubectl create ns monitoring
kubectl apply -f daemonset.yaml

1.2.2 验证测试

[root@k8s-deploy yaml]#kubectl get daemonset -n monitoring
NAME            DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
node-exporter   6         6         6       6            6           <none>          106s

[root@k8s-deploy yaml]#kubectl get pod -n monitoring -owide
NAME                  READY   STATUS    RESTARTS   AGE    IP          NODE        NOMINATED NODE   READINESS GATES
node-exporter-77lmq   1/1     Running   0          2m2s   10.0.0.13   10.0.0.13   <none>           <none>
node-exporter-jf6xz   1/1     Running   0          2m2s   10.0.0.11   10.0.0.11   <none>           <none>
node-exporter-k6z4z   1/1     Running   0          2m2s   10.0.0.41   10.0.0.41   <none>           <none>
node-exporter-mpfwv   1/1     Running   0          2m2s   10.0.0.43   10.0.0.43   <none>           <none>
node-exporter-xtkfs   1/1     Running   0          2m2s   10.0.0.12   10.0.0.12   <none>           <none>
node-exporter-zzszd   1/1     Running   0          2m2s   10.0.0.42   10.0.0.42   <none>           <none>

每个节点都部署了一个node-exporter​ pod

浏览器访问

[root@k8s-deploy yaml]#kubectl get svc -n monitoring -owide
NAME            TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE     SELECTOR
node-exporter   NodePort   10.100.70.108   <none>        9100:30022/TCP   4m19s   k8s-app=node-exporter

二、熟悉pod的常见状态及故障原因

https://kubernetes.io/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/

pod调度流程

pod常见状态及原因如下:

  • Unschedulabel

    pod不能被调度,kube-scheduler没有匹配到合适的node节点

  • PodScheduler

    pod正处于调度中,在kube-scheduler刚开始调度的时候,还没有将pod分配到指定的node,在筛选出合适的节点后就会更新etcd数据,将pod分配到指定的pod

  • Pending

    正在创建Pod但是pod中的容器还没有全部被创建完成,处于此状态的Pod应该检查Pod依赖的存储是否有权限挂载等。

  • Failed

    Pod中有容器启动失败而导致pod工作异常

  • Unknown

    由于某种原因而无法获得pod的当前状态,通常是由于与pod所在的node节点通信错误

  • Initialized

    所有pod中的初始化容器已经完成了

  • ImagePullBackOff

    Pod所在的node节点下载镜像失败

  • Running

    Pod内部的容器已经被创建并且启动

  • Ready

    表示pod中的容器已经可以提供访问服务

  • Error

    pod启动过程发生错误

  • NodeLost

    pod所在节点失联

  • Waiting

    pod等待启动

  • Terminating

    pod正在被销毁

  • CrashLoopBackOff

    pod之前已启动,因异常退出,但是kubelet正在将它重启

  • InvaliImageName

    node节点无法解析镜像名称,导致镜像无法下载

  • ImageInspectError

    无法校验镜像,镜像不完整导致

  • ErrImageNeverPull

    策略紧张拉取镜像,镜像中心权限是私有等

  • RegistryUnavailable

    镜像服务器不可用,网络原因或harbor宕机

  • ErrImagePull

    镜像拉取出错,超时或下载被强制终止

  • CreateContainerConfigError

    不能创建kebelet使用的容器配置

  • CreateContainerError

    创建容器失败

  • RunContainerError

    pod运行失败,容器中没有初始化pid为1的守护进程等

  • ContainersNotInitialized

    pod没有初始化完成

  • ContainersNotReady

    pod没有准备完毕

  • ContainerCreating

    pod正在创建中

  • PodInitializing

    pod正在初始化中

  • DockeDaemonNotReady

    node节点docker服务没有启动

  • NetworkPluginNotReady

    网络插件没有启动

三、熟练使用startupProbe、livenessProbe、readinessProbe探针对pod进行状态监测

pod的生命周期

3.1 探针简介

https://kubernetes.io/zh-cn/docs/concepts/workloads/pods/pod-lifecycle/#container-probes

https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/

probe(探针) 是由 kubelet 对容器执行的定期诊断,以保证pod的状态始终处于运行状态, 要执行诊断,kubelet 既可以在容器内执行代码,也可以发出一个网络请求。

3.1.1 检查机制

使用探针来检查容器有四种不同的方法。

  1. exec

​在容器内执行指定命令。如果命令退出时返回码为 0 则认为诊断成功。

  1. grpc

使用 gRPC 执行一个远程过程调用。 目标应该实现 gRPC健康检查。 如果响应的状态是 "SERVING",则认为诊断成功。 gRPC 探针是一个 Alpha 特性,只有在启用了 "GRPCContainerProbe" 特性门控时才能使用。

  1. httpGet​​

对容器的 IP 地址上指定端口和路径执行 HTTP GET​ 请求。如果响应的状态码大于等于 200 且小于 400,则诊断被认为是成功的。

  1. tcpSocket

对容器的 IP 地址上的指定端口执行 TCP 检查。如果端口打开,则诊断被认为是成功的。

3.1.2 探测结果

每次探测都将获得以下三种结果之一:

  1. Success(成功)

    容器通过了诊断。

  2. Failure(失败)

    容器未通过诊断。

  3. Unknown(未知)

    诊断失败,因此不会采取任何行动。

3.1.3 探测类型

针对运行中的容器,kubelet​ 可以选择是否执行以下三种探针,以及如何针对探测结果作出反应:

  1. livenessProbe(存活探针)

​指示容器是否正在运行。如果存活态探测失败,则 kubelet 会杀死容器, 并且容器将根据其重启策略决定未来。如果容器不提供存活探针, 则默认状态为 Success​。

  1. readinessProbe(就绪探针)

​指示容器是否准备好为请求提供服务。如果就绪态探测失败, 端点控制器将从与 Pod 匹配的所有服务的端点列表中删除该 Pod 的 IP 地址。 初始延迟之前的就绪态的状态值默认为 Failure​。 如果容器不提供就绪态探针,则默认状态为 Success​。

  1. startupProbe(启动探针)

​指示容器中的应用是否已经启动。如果提供了启动探针,则所有其他探针都会被 禁用,直到此探针成功为止。如果启动探测失败,kubelet​ 将杀死容器, 而容器依其重启策略进行重启。 如果容器没有提供启动探测,则默认状态为 Success​。

3.2 配置探针

https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/

探针配置字段说明:

  • initialDelaySeconds​:容器启动后要等待多少秒后才启动启动、存活和就绪探针, 默认是 0 秒,最小值是 0。
  • periodSeconds​:执行探测的时间间隔(单位是秒)。默认是 10 秒。最小值是 1。
  • timeoutSeconds​:探测的超时后等待多少秒。默认值是 1 秒。最小值是 1。
  • successThreshold​:探针在失败后,被视为成功的最小连续成功数。默认值是 1。 存活和启动探测的这个值必须是 1。最小值是 1。
  • failureThreshold​:探针连续失败了 failureThreshold​ 次之后, Kubernetes 认为总体上检查已失败:容器状态未就绪、不健康、不活跃。 对于启动探针或存活探针而言,如果至少有 failureThreshold​ 个探针已失败, Kubernetes 会将容器视为不健康并为这个特定的容器触发重启操作。 kubelet 会考虑该容器的 terminationGracePeriodSeconds​ 设置。 对于失败的就绪探针,kubelet 继续运行检查失败的容器,并继续运行更多探针; 因为检查失败,kubelet 将 Pod 的 Ready​ 状况设置为 false​。
  • terminationGracePeriodSeconds​:为 kubelet 配置从为失败的容器触发终止操作到强制容器运行时停止该容器之前等待的宽限时长。 默认值是继承 Pod 级别的 terminationGracePeriodSeconds​ 值(如果不设置则为 30 秒),最小值为 1。

3.2.1 livenessProbe

  1. 编写livenessProbe
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: 
      app: myserver-myapp-frontend-label
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: harbor.chu.net/baseimages/nginx:1.20.0
        ports:
        - containerPort: 80
        #readinessProbe:
        livenessProbe:			# 存活探针
          httpGet:			# 检测方式:http
            path: /index.html		# 访问http服务的路径
            port: 80			# 访问容器的端口号或端口名,gRPC 探测不能使用命名端口或定制主机。
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30023
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label 
  1. 验证
  • 查看状态
[root@k8s-deploy case3-Probe]#kubectl get svc -n myserver
NAME                              TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
myserver-myapp-frontend-service   NodePort   10.100.120.93   <none>        81:30023/TCP   95s


# pod状态running
[root@k8s-deploy case3-Probe]#kubectl get pod -n myserver
NAME                                                  READY   STATUS    RESTARTS        AGE
myserver-myapp-frontend-deployment-777fbb9c56-wnf49   1/1     Running   0               8s
  • 进入容器,删除index.html文件
# 删除index.html文件,
[root@k8s-deploy yaml]#kubectl exec -it myserver-myapp-frontend-deployment-777fbb9c56-wnf49 -n myserver bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@myserver-myapp-frontend-deployment-777fbb9c56-wnf49:/# rm /usr/share/nginx/html/index.html 
# 存活探测失败,kubelet 会杀死容器,退出当前连接
root@myserver-myapp-frontend-deployment-777fbb9c56-wnf49:/# command terminated with exit code 137

# pod自动重启1次,状态恢复正常
[root@k8s-deploy case3-Probe]#kubectl get pod -n myserver
NAME                                                  READY   STATUS    RESTARTS        AGE
myserver-myapp-frontend-deployment-777fbb9c56-wnf49   1/1     Running   1 (4s ago)      2m13s

# pod重启后,容器恢复初始化状态,丢失的index.html文件恢复正常。
[root@k8s-deploy yaml]#kubectl exec -it myserver-myapp-frontend-deployment-777fbb9c56-wnf49 -n myserver bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@myserver-myapp-frontend-deployment-777fbb9c56-wnf49:/# ls /usr/share/nginx/html/index.html 
/usr/share/nginx/html/index.html

存活探针检测失败,自动重启pod,将pod恢复初始状态

3.2.2 readlinessProbe

  1. 编写readlinessProbe
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels: 
      app: myserver-myapp-frontend-label
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      containers:
      - name: myserver-myapp-frontend-label
        image: harbor.chu.net/baseimages/nginx:1.20.0
        ports:
        - containerPort: 80
        readinessProbe:			# 就绪探针
        #livenessProbe:
          httpGet:
            path: /index.html
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30023
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label
  1. 验证
  • 查看状态

service后端pod的ip:port为10.200.169.165:80

[root@k8s-deploy case3-Probe]#kubectl get svc -n myserver
NAME                              TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
myserver-myapp-frontend-service   NodePort   10.100.198.37   <none>        81:30023/TCP   7s

# endpoint地址
[root@k8s-deploy case3-Probe]#kubectl get ep -n myserver
NAME                              ENDPOINTS           AGE
myserver-myapp-frontend-service   10.200.169.165:80   14s
  • 进入容器,删除index.html文件
# 进入容器,删除http检测的index.html文件
[root@k8s-deploy yaml]#kubectl exec -it myserver-myapp-frontend-deployment-f68bdc86d-rcgbp  -n myserver bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@myserver-myapp-frontend-deployment-f68bdc86d-rcgbp:/# mv /usr/share/nginx/html/index.html /tmp

# 查看endpoint地址为空
[root@k8s-deploy case3-Probe]#kubectl get ep -n myserver
NAME                              ENDPOINTS   AGE
myserver-myapp-frontend-service               59s

# pod 中容器就绪状态数量为0
[root@k8s-deploy case3-Probe]#kubectl get pod -n myserver
NAME                                                 READY   STATUS    RESTARTS      AGE
myserver-myapp-frontend-deployment-f68bdc86d-rcgbp   0/1     Running   0             65s

# 网页无法正常访问
[root@k8s-deploy case3-Probe]#curl 10.0.0.42:30023
curl: (7) Failed to connect to 10.0.0.42 port 30023: Connection refused

就绪探针检测失败,自动将service后端的endpoint地址移除,web服务不可用

  • 恢复容器index.html文件
root@myserver-myapp-frontend-deployment-f68bdc86d-rcgbp:/# echo 'readinessProbe test' >> /usr/share/nginx/html/index.html

# pod状态恢复就绪
[root@k8s-deploy case3-Probe]#kubectl get pod -n myserver
NAME                                                 READY   STATUS    RESTARTS      AGE
myserver-myapp-frontend-deployment-f68bdc86d-rcgbp   1/1     Running   0             111s

# endpoint地址恢复正常
[root@k8s-deploy case3-Probe]#kubectl get ep -n myserver
NAME                              ENDPOINTS           AGE
myserver-myapp-frontend-service   10.200.169.165:80   113s

# 网页访问恢复正常
[root@k8s-deploy case3-Probe]#curl 10.0.0.42:30023
readinessProbe test

就绪探针检测成功,自动将service后端的endpoint地址添加回来,网页访问恢复正常

3.2.3 startupProbe

  1. 创建yaml文件
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
  namespace: myserver
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend-label
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend-label
    spec:
      terminationGracePeriodSeconds: 60
      containers:
      - name: myserver-myapp-frontend-label
        image: nginx:1.20.2
        ports:
        - containerPort: 80
        startupProbe:
          httpGet:
            path: /my-index.html	# 启动检测my-index.html
            port: 80
          initialDelaySeconds: 5	#首次检测延迟5s
          failureThreshold: 3		#从成功转为失败的次数
          periodSeconds: 3		#探测间隔周期
        readinessProbe:
          httpGet:
            path: /index.html
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
        livenessProbe:
          httpGet:
            path: /index.html
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend-service
  namespace: myserver
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30023
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend-label

添加启动、存活、就绪探针,启动探针检测成功后存活探针和就绪探针才会正常检测(存活探针、就绪探针检测无先后顺序)。

  1. 验证
  • 查看状态
[root@k8s-deploy case3-Probe]#kubectl get svc -n myserver
NAME                              TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
myserver-myapp-frontend-service   NodePort   10.100.115.71   <none>        81:30023/TCP   52s

# pod服务就绪数量0
[root@k8s-deploy case3-Probe]#kubectl get pod -n myserver
NAME                                                 READY   STATUS    RESTARTS      AGE
myserver-myapp-frontend-deployment-f9774f8b8-m85br   0/1     Running   2 (3s ago)    54s

# service无后端地址
[root@k8s-deploy case3-Probe]#kubectl get ep -n myserver
NAME                              ENDPOINTS   AGE
myserver-myapp-frontend-service   <none>      59s

# 启动探针检测失败
[root@k8s-deploy case3-Probe]#kubectl describe pod myserver-myapp-frontend-deployment-f9774f8b8-m85br -n myserver
...
Events:
  Type     Reason     Age                From               Message
  ----     ------     ----               ----               -------
  Normal   Scheduled  23s                default-scheduler  Successfully assigned myserver/myserver-myapp-frontend-deployment-7cfc55c668-jskcs to 10.0.0.42
  Normal   Pulled     11s (x2 over 22s)  kubelet            Container image "nginx:1.20.2" already present on machine
  Normal   Created    11s (x2 over 22s)  kubelet            Created container myserver-myapp-frontend-label
  Normal   Started    11s (x2 over 22s)  kubelet            Started container myserver-myapp-frontend-label
  Normal   Killing    11s                kubelet            Container myserver-myapp-frontend-label failed startup probe, will be restarted
  Warning  Unhealthy  2s (x5 over 17s)   kubelet            Startup probe failed: HTTP probe failed with statuscode: 404

启动探针检测my-index.html文件失败,pod服务未就绪

  • 添加文件
# 添加my-index.html文件
[root@k8s-deploy yaml]#kubectl exec -it myserver-myapp-frontend-deployment-f9774f8b8-lb2jb -n myserver bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@myserver-myapp-frontend-deployment-f9774f8b8-lb2jb:/# cp /usr/share/nginx/html/index.html /usr/share/nginx/html/my-index.html

# ready数量为1
[root@k8s-deploy case3-Probe]#kubectl get pod -n myserver
NAME                                                 READY   STATUS    RESTARTS      AGE
myserver-myapp-frontend-deployment-f9774f8b8-lb2jb   1/1     Running   0             36s
net-test1                                            1/1     Running   2 (30m ago)   6h4m

[root@k8s-deploy case3-Probe]#kubectl get ep -n myserver
NAME                              ENDPOINTS           AGE
myserver-myapp-frontend-service   10.200.169.166:80   40s

# 获取http响应码
[root@k8s-deploy yaml]#curl -Is 10.0.0.42:30023/my-index.html|grep HTTP
HTTP/1.1 200 OK

立即进入容器添加my-index.html文件,启动探针检测文件成功,pod服务就绪,存活探针、就绪探针正常进行检测。

四、掌握基于nerdctl + buildkitd构建容器镜像

4.1 部署buildkitd

https://github.com/moby/buildkit

buildkitd组成部分

buildkitd(服务端),支持runc和containerd作为镜像构建环境,默认是runc,可以更换为containerd
buildctl(客户端),负责解析Dockerfile文件,并向服务端buildkitd发送构建请求

主机

k8s-master2 10.0.0.12 buildkitd服务端(镜像构建服务器)

安装buildkitd

 cd /usr/local/src
# 下载并解tar
wget https://github.com/moby/buildkit/releases/download/v0.11.0/buildkit-v0.11.0.linux-amd64.tar.gz
tar xvf buildkit-v0.11.0.linux-amd64.tar.gz
mv bin/buildctl bin/buildkitd /usr/local/bin/

# 准备buildkit socket
cat >> /lib/systemd/system/buildkit.socket <<EOF
[Unit]
Description=BuildKit
Documentation=https://github.com/moby/buildkit

[Socket]
ListenStream=%t/buildkit/buildkitd.sock

[Install]
WantedBy=sockets.target
EOF

# 准备buildkit service
cat >> /lib/systemd/system/buildkit.service <<EOF
[Unit]
Description=BuildKit
Requires=buildkit.socket
After=buildkit.socket
Documentation=https://github.com/moby/buildkit

[Service]
ExecStart=/usr/local/bin/buildkitd --oci-worker=false --containerd-worker=true

[Install]
WantedBy=multi-user.target
EOF

# 启动服务
systemctl daemon-reload
systemctl enable buildkit
systemctl start buildkit

查看buildkit服务状态

[root@k8s-master2 src]#systemctl is-active buildkit.service 
active

4.2 部署nerdctl

# 安装nerdctl
wget https://github.com/containerd/nerdctl/releases/download/v1.0.0/nerdctl-1.0.0-linux-amd64.tar.gz
tar -xvf nerdctl-1.0.0-linux-amd64.tar.gz
mv nerdctl /usr/bin/

# 安装CNI插件
wget https://github.com/containernetworking/plugins/releases/download/v1.1.0/cni-plugins-linux-amd64-v1.1.0.tgz
mkdir -p /opt/cni/bin
tar -xvf cni-plugins-linux-amd64-v1.1.0.tgz -C /opt/cni/bin

# 配置nerdctl命令补全
source <(nerdctl completion bash)
echo "source <(nerdctl completion bash)" >> ~/.bashrc

上传镜像测试

# 登录harbor仓库,添加参数--insecure-registry
nerdctl login --insecure-registry harbor.chu.net
#下载镜像
nerdctl pull centos:7.9.2009
# 打tag
nerdctl tag centos:7.9.2009 harbor.chu.net/baseimages/centos:7.9.2009
# 上传镜像至harbor仓库,添加--insecure-registry参数
nerdctl --insecure-registry push harbor.chu.net/baseimages/centos:7.9.2009

4.3 分发harbor证书

# 镜像构建服务器创建harbor域名目录
[root@k8s-master2 ~]#mkdir -p /etc/containerd/certs.d/harbor.chu.net

# harbor证书分发
## 格式转换
[root@harbor1 harbor]#cd /apps/harbor/certs/
[root@harbor1 certs]#openssl x509 -inform PEM -in chu.net.crt -out chu.net.cert 
[root@harbor1 certs]#ls
ca.crt  ca.key  ca.srl  chu.net.cert  chu.net.crt  chu.net.csr  chu.net.key  v3.ext

## 复制证书至镜像构建服务器
[root@harbor1 certs]# scp ca.crt chu.net.cert chu.net.key 10.0.0.12:/etc/containerd/certs.d/harbor.chu.net/

## 查看镜像服务器证书
[root@k8s-master2 ~]#ls /etc/containerd/certs.d/harbor.chu.net/
ca.crt  chu.net.cert  chu.net.key

登录harbor

[root@k8s-master2 ~]#nerdctl login harbor.chu.net
Enter Username: admin
Enter Password: 
WARNING: Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

4.4 构建镜像

4.4.1 编写Dockerfile

FROM ubuntu:22.04
ADD sources.list /etc/apt/sources.list
RUN apt update && apt  install -y iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute  gcc openssh-server lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip make

ADD nginx-1.22.0.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.22.0 && ./configure --prefix=/apps/nginx && make && make install  && ln -s /apps/nginx/sbin/nginx /usr/bin
RUN groupadd  -g 2088 nginx && useradd  -g nginx -s /usr/sbin/nologin -u 2088 nginx && chown -R nginx:nginx /apps/nginx
ADD nginx.conf /apps/nginx/conf/
ADD frontend.tar.gz /apps/nginx/html/


EXPOSE 80 443
#ENTRYPOINT ["nginx"]
CMD ["nginx","-g","daemon off;"]

4.4.2 准备文件

[root@k8s-master2 ubuntu]#ls
Dockerfile  frontend.tar.gz  nginx-1.22.0.tar.gz  nginx.conf  sources.list
  • nginx源码
# 下载nginx
wget http://nginx.org/download/nginx-1.20.2.tar.gz
  • nginx.conf配置文件
worker_processes  1;
events {
    worker_connections  1024;
}
http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    upstream tomcat {
        server 10.0.0.101:8080;
        server 10.0.0.102:8080;
    }
    server {
        listen       80;
        server_name  localhost;
        location / {
            root   html;
            index  index.html index.htm;
        }
	location /myapp {
           proxy_pass http://tomcat;
        }
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}

  • source.list(清华镜像源)
# 默认注释了源码镜像以提高 apt update 速度,如有需要可自行取消注释
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-updates main restricted universe multiverse
# deb-sr https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-updates main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-backports main restricted universe multiverse
# deb-sr https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-backports main restricted universe multiverse
deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-security main restricted universe multiverse
# deb-src https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-security main restricted universe multiverse

# 预发布软件源,不建议启用
# deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-proposed main restricted universe multiverse
# deb-src http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ jammy-proposed main restricted universe multiverse
  • frontend.tar.gz(web文件)
[root@k8s-master2 ubuntu]#tar tf frontend.tar.gz 
./
./images/
./images/1.jpg
./index.html

4.4.3 构建本地镜像

# 构建本地镜像
nerdctl build -t harbor.chu.net/baseimages/nginx:1.20.2 .

构建流程

[root@k8s-master2 ubuntu]#nerdctl build -t harbor.chu.net/baseimages/nginx:1.20.2 .
[+] Building 156.6s (12/13)                                                                                                                                                                                 
[+] Building 156.7s (13/13) FINISHED                                                                                                                                                                        
 => [internal] load build definition from Dockerfile                                                                                                                                                     0.0s
 => => transferring dockerfile: 885B                                                                                                                                                                     0.0s
 => [internal] load .dockerignore                                                                                                                                                                        0.0s
 => => transferring context: 2B                                                                                                                                                                          0.0s
 => [internal] load metadata for docker.io/library/ubuntu:22.04                                                                                                                                          7.4s
 => [1/8] FROM docker.io/library/ubuntu:22.04@sha256:27cb6e6ccef575a4698b66f5de06c7ecd61589132d5a91d098f7f3f9285415a9                                                                                   16.3s
 => => resolve docker.io/library/ubuntu:22.04@sha256:27cb6e6ccef575a4698b66f5de06c7ecd61589132d5a91d098f7f3f9285415a9                                                                                    0.0s
 => => sha256:6e3729cf69e0ce2de9e779575a1fec8b7fb5efdfa822829290ab6d5d1bc3e797 30.43MB / 30.43MB                                                                                                        14.7s
 => => extracting sha256:6e3729cf69e0ce2de9e779575a1fec8b7fb5efdfa822829290ab6d5d1bc3e797                                                                                                                1.5s
 => [internal] load build context                                                                                                                                                                        0.1s
 => => transferring context: 1.12MB                                                                                                                                                                      0.0s
 => [2/8] ADD sources.list /etc/apt/sources.list                                                                                                                                                         0.2s
 => [3/8] RUN apt update && apt  install -y iproute2  ntpdate  tcpdump telnet traceroute nfs-kernel-server nfs-common  lrzsz tree  openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump  64.4s 
 => [4/8] ADD nginx-1.22.0.tar.gz /usr/local/src/                                                                                                                                                        0.3s 
 => [5/8] RUN cd /usr/local/src/nginx-1.22.0 && ./configure --prefix=/apps/nginx && make && make install  && ln -sv /apps/nginx/sbin/nginx /usr/bin                                                     32.3s 
 => [6/8] RUN groupadd  -g 2088 nginx && useradd  -g nginx -s /usr/sbin/nologin -u 2088 nginx && chown -R nginx.nginx /apps/nginx                                                                        0.3s 
 => [7/8] ADD nginx.conf /apps/nginx/conf/                                                                                                                                                               0.1s 
 => [8/8] ADD frontend.tar.gz /apps/nginx/html/                                                                                                                                                          0.1s 
 => exporting to docker image format                                                                                                                                                                    35.3s 
 => => exporting layers                                                                                                                                                                                 23.0s 
 => => exporting manifest sha256:1fd16223228c63b12abf40319de112e5bd5cad4f713466109f75b581ae9a9ee5                                                                                                        0.0s
 => => exporting config sha256:bc338d3676045d6e665338bff41a59be13abe417a62a4b4d7e360659ea7b2277                                                                                                          0.0s
 => => sending tarball                                                                                                                                                                                  12.3s
Loaded image: harbor.chu.net/baseimages/nginx:1.20.2

4.4.4 镜像测试

# 创建容器,需确认安装CNI插件
[root@k8s-master2 bin]#nerdctl run -d -p 80:80 harbor.chu.net/baseimages/nginx:1.20.2
a3e0cfc5118bdc7aa067ff9402d2db36cd409a750bd4230388c80ba2e8fc9d12

[root@k8s-master2 bin]#nerdctl ps
CONTAINER ID    IMAGE                                     COMMAND                   CREATED           STATUS    PORTS                 NAMES
a3e0cfc5118b    harbor.chu.net/baseimages/nginx:1.20.2    "nginx -g daemon off;"    16 seconds ago    Up        0.0.0.0:80->80/tcp    nginx-a3e0c

浏览器访问

4.4.5 上传镜像

# 上传至harbor仓库
nerdctl push harbor.chu.net/baseimages/nginx:1.20.2

上传过程

[root@k8s-master2 ~]#nerdctl push harbor.chu.net/baseimages/nginx:1.20.2
INFO[0000] pushing as a reduced-platform image (application/vnd.docker.distribution.manifest.v2+json, sha256:1fd16223228c63b12abf40319de112e5bd5cad4f713466109f75b581ae9a9ee5) 
manifest-sha256:1fd16223228c63b12abf40319de112e5bd5cad4f713466109f75b581ae9a9ee5: done           |++++++++++++++++++++++++++++++++++++++| 
config-sha256:bc338d3676045d6e665338bff41a59be13abe417a62a4b4d7e360659ea7b2277:   done           |++++++++++++++++++++++++++++++++++++++| 
elapsed: 8.5 s                                                                    total:  5.2 Ki (623.0 B/s)  

查看harbor仓库

4.5 nginx代理harbor(https)

存在harbor自签名证书不被信任问题,解决方法为:将证书从harbor迁移至负载均衡服务器上,负载均衡服务至harbor为http。


主机

harbor1 10.0.0.101 VIP 10.0.0.100
harbor2 10.0.0.102 VIP 10.0.0.100

4.5.1 harbor修改http协议

[root@harbor1 ~]#cd /apps/harbor/
# 将https配置注释
[root@harbor1 harbor]#vim harbor.yml
...
#https:
#  # https port for harbor, default is 443
#  port: 443
#  # The path of cert and key files for nginx
#  certificate: /apps/harbor/certs/chu.net.crt
#  private_key: /apps/harbor/certs/chu.net.key

# 更新配置,重启docker-compose
[root@harbor harbor]#docker-compose stop	#或者docker-compose down -v
[root@harbor harbor]#./prepare 
[root@harbor harbor]#docker-compose start	#或者docker-compose up -d

4.5.2 nginx实现https

keepalived配置方法参考:七、keeplived 结合haproxy 实现高可用[^1]

  1. 安装nginx
mkdir -p /apps/nginx
apt install -y make
cd /usr/local/src/
wget http://nginx.org/download/nginx-1.22.1.tar.gz
tar -xvf nginx-1.22.1.tar.gz
cd nginx-1.22.1
./configure --prefix=/apps/nginx \
--with-http_ssl_module \
--with-http_v2_module \
--with-http_realip_module \
--with-http_stub_status_module \
--with-http_gzip_static_module \
--with-pcre \
--with-stream \
--with-stream_ssl_module \
--with-stream_realip_module

make && make install
  1. 准备证书
# 创建证书目录,并从harbor服务器上复制证书
mkdir -p /apps/nginx/certs
scp 10.0.0.101:/apps/harbor/certs/chu.net.crt 10.0.0.101:/apps/harbor/certs/chu.net.key /apps/nginx/certs/
  1. 配置https文件
[root@k8s-ha1 nginx]#cat conf/nginx.conf|egrep -v '^\s*#|^$'
worker_processes  auto;
events {
    worker_connections  1024;
}
http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    client_max_body_size 1000m;
    upstream harbor {
        ip_hash;
        server 10.0.0.101;
        server 10.0.0.102;
    }
    server {
        listen       10.0.0.100:80;
        listen       10.0.0.100:443 ssl;
        ssl_certificate      /apps/nginx/certs/chu.net.crt;
        ssl_certificate_key  /apps/nginx/certs/chu.net.key;
        ssl_session_cache    shared:SSL:20m;
        ssl_session_timeout  10m;
        server_name  harbor.chu.net;

        location / {
            proxy_pass http://harbor;
        }
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}

  1. 配置启动服务
cat > /usr/lib/systemd/system/nginx.service <<EOF
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network-online.target remote-fs.target nss-lookup.target
Wants=network-online.target

[Service]
Type=forking
PIDFile=/apps/nginx/logs/nginx.pid
ExecStartPre=/bin/rm -f /apps/nginx/logs/nginx.pid
ExecStartPre=/apps/nginx//sbin/nginx -t
ExecStart=/apps/nginx/sbin/nginx -c /apps/nginx/conf/nginx.conf
ExecReload=/bin/kill -s HUP \$MAINPID
KillSignal=SIGQUIT
TimeoutStopSec=5
KillMode=process
PrivateTmp=true

[Install]
WantedBy=multi-user.target
EOF

启动服务

systemctl daemon-reload
systemctl enable --now nginx

4.5.3 浏览器访问

http

https

4.5.4 配置buildkitd

支持http

mkdir -p /etc/buildkit
cat >>/etc/buildkit/buildkitd.toml <<EOF
[registry."harbor.chu.net"]
  http = true
  insecure = true
EOF

# 重启服务
systemctl restart buildkit.service

4.5.5 配置nerdctl

配置默认namespace,支持http方式

mkdir -p /etc/nerdctl
cat >>/etc/nerdctl/nerdctl.toml <<EOF
namespace = "k8s.io"
debug = false
debug_full = false
insecure_registry = true
EOF

4.5.6 自定义构建镜像验证

# 下载镜像
nerdctl pull harbor.chu.net/baseimages/nginx:1.20.2

# 编写Dockerfile
cat >Dockerfile <<EOF 
FROM harbor.chu.net/baseimages/nginx:1.20.2

CMD ["tail","-f","/etc/hosts"]
EOF

# 构建镜像
nerdctl build -t harbor.chu.net/test/nginx:v11 .

构建过程

[root@k8s-master2 dockerfile]#nerdctl build -t harbor.chu.net/test/nginx:v11 .
[+] Building 12.4s (5/5)                                                                                                                                                                                  
[+] Building 12.5s (5/5) FINISHED                                                                                                                                                                         
 => [internal] load .dockerignore                                                                                                                                                                        0.0s
 => => transferring context: 2B                                                                                                                                                                          0.0s
 => [internal] load build definition from Dockerfile                                                                                                                                                     0.0s
 => => transferring dockerfile: 113B                                                                                                                                                                     0.0s
 => [internal] load metadata for harbor.chu.net/baseimages/nginx:1.20.2                                                                                                                                  0.0s
 => CACHED [1/1] FROM harbor.chu.net/baseimages/nginx:1.20.2@sha256:1fd16223228c63b12abf40319de112e5bd5cad4f713466109f75b581ae9a9ee5                                                                     0.0s
 => => resolve harbor.chu.net/baseimages/nginx:1.20.2@sha256:1fd16223228c63b12abf40319de112e5bd5cad4f713466109f75b581ae9a9ee5                                                                            0.0s
 => exporting to docker image format                                                                                                                                                                    12.3s
 => => exporting layers                                                                                                                                                                                  0.0s
 => => exporting manifest sha256:0ed650b501158236ab9a4c4563d697bda491790a49b7da3632496ce159ac108c                                                                                                        0.0s
 => => exporting config sha256:652aabc714544426d2b4f2c55b04e181a5533c27337393c80a7b37a53100c761                                                                                                          0.0s
 => => sending tarball                                                                                                                                                                                  12.3s
Loaded image: harbor.chu.net/test/nginx:v11

上传镜像至harbor仓库

[root@k8s-master2 dockerfile]#nerdctl push harbor.chu.net/test/nginx:v11
INFO[0000] pushing as a reduced-platform image (application/vnd.docker.distribution.manifest.v2+json, sha256:d730a1e993a94b80a861e4e10990cdf11a14bf8c2a89d41ef023898da0a1f197) 
WARN[0000] skipping verifying HTTPS certs for "harbor.chu.net" 
manifest-sha256:d730a1e993a94b80a861e4e10990cdf11a14bf8c2a89d41ef023898da0a1f197: done           |++++++++++++++++++++++++++++++++++++++| 
config-sha256:c0c6da3dad5aca02318f6d335a0f707655b31c7b5380b9e9f0d32d2d6f46cadf:   done           |++++++++++++++++++++++++++++++++++++++| 
elapsed: 1.8 s                                                                    total:  7.9 Ki (4.4 KiB/s)  

登录网页查看

五、自定义镜像运行Nginx及Java服务并基于NFS实现动静分离

服务流程

业务镜像规划

系统基础镜像制作

  • 编写Dockerfile
#自定义Centos 基础镜像
FROM centos:7.9.2009
MAINTAINER Areke [email protected]

# filebeat日志收集:https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.12.1-x86_64.rpm
ADD filebeat-7.12.1-x86_64.rpm /tmp
RUN yum install -y /tmp/filebeat-7.12.1-x86_64.rpm vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop &&  rm -rf /etc/localtime /tmp/filebeat-7.12.1-x86_64.rpm && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime  && useradd nginx -u 2088
  • 构建镜像,并上传至本地harbor仓库
nerdctl build -t harbor.chu.net/baseimages/centos-base:7.9.2009 .
nerdctl push harbor.chu.net/baseimages/centos-base:7.9.2009

5.1 Tomcat

5.1.1 JDK基础镜像制作

  • 编写Dockerfile
#JDK Base Image
FROM harbor.chu.net/baseimages/centos-base:7.9.2009

ADD jdk-8u212-linux-x64.tar.gz /usr/local/src/
RUN ln -sv /usr/local/src/jdk1.8.0_212 /usr/local/jdk 
ADD profile /etc/profile

ENV JAVA_HOME /usr/local/jdk
ENV JRE_HOME $JAVA_HOME/jre
ENV CLASSPATH $JAVA_HOME/lib/:$JRE_HOME/lib/
ENV PATH $PATH:$JAVA_HOME/bin
  • 构建镜像,并上传至harbor仓库
nerdctl build -t harbor.chu.net/baseimages/jdk-base:v8.212 .
nerdctl push harbor.chu.net/baseimages/jdk-base:v8.212

5.1.2 tomcat基础镜像制作

  • 编写Dockerfile
#Tomcat 8.5.43基础镜像
FROM harbor.chu.net/baseimages/jdk-base:v8.212

RUN mkdir -p /apps /data/tomcat/webapps /data/tomcat/logs
ADD apache-tomcat-8.5.43.tar.gz  /apps
RUN useradd tomcat -u 2050 && ln -sv /apps/apache-tomcat-8.5.43 /apps/tomcat && chown -R tomcat:tomcat /apps /data
  • 构建镜像,并上传至harbor仓库
nerdctl build -t harbor.chu.net/baseimages/tomcat-base:v8.5.43 .
nerdctl push harbor.chu.net/baseimages/tomcat-base:v8.5.43

5.1.3 tomcat业务镜像app1制作

  • 编写Dockerfile
#tomcat web1
FROM harbor.chu.net/baseimages/tomcat-base:v8.5.43

ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD server.xml /apps/tomcat/conf/server.xml
ADD app1.tar.gz /data/tomcat/webapps/myapp/
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh

RUN chown -R nginx:nginx /data/ /apps/ && chmod a+x /apps/tomcat/bin/catalina.sh /apps/tomcat/bin/run_tomcat.sh

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]
  • 构建镜像,并上传至harbor仓库
nerdctl build -t harbor.chu.net/web/tomcat-app1:v1 .
nerdctl push harbor.chu.net/web/tomcat-app1:v1

5.1.4 在kubernetes环境运行tomcat

提前创建好namespace:web

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-tomcat-app1-deployment-label
  name: web-tomcat-app1-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: web-tomcat-app1-selector
    spec:
      containers:
      - name: web-tomcat-app1-container
        image: harbor.chu.net/web/tomcat-app1:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-tomcat-app1-service-label
  name: web-tomcat-app1-service
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30030
  selector:
    app: web-tomcat-app1-selector

访问测试

[root@k8s-master2 tomcat-app1]#kubectl get pod -n web
NAME                                          READY   STATUS    RESTARTS   AGE
web-tomcat-app1-deployment-5587f96995-x25d6   1/1     Running   0          14s
[root@k8s-master2 tomcat-app1]#kubectl get svc -n web
NAME                      TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
web-tomcat-app1-service   NodePort   10.100.185.255   <none>        80:30030/TCP   48s

5.2 Nginx

5.2.1 nginx基础镜像制作

  • 编写Dockerfile
FROM harbor.chu.net/baseimages/centos-base:7.9.2009

RUN yum install -y vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop
ADD nginx-1.22.0.tar.gz /usr/local/src/
RUN cd /usr/local/src/nginx-1.22.0 && ./configure  && make && make install && ln -sv /usr/local/nginx/sbin/nginx /usr/sbin/nginx  &&rm -rf /usr/local/src/nginx-1.22.0.tar.gz 
  • 构建镜像,并上传至本地harbor仓库
nerdctl build -t harbor.chu.net/baseimages/nginx-base:1.22.0 .
nerdctl push harbor.chu.net/baseimages/nginx-base:1.22.0

5.2.2 nginx业务镜像制作

  • 编写Dockerfile
FROM harbor.chu.net/baseimages/nginx-base:1.22.0

ADD nginx.conf /usr/local/nginx/conf/nginx.conf
ADD app1.tar.gz  /usr/local/nginx/html/webapp/
ADD index.html  /usr/local/nginx/html/index.html

#静态资源挂载路径
RUN mkdir -p /usr/local/nginx/html/webapp/static /usr/local/nginx/html/webapp/images
EXPOSE 80 443

CMD ["nginx"]

nginx.conf

[root@k8s-master2 nginx]#egrep -v '^\s*#|^$' nginx.conf 
user  nginx nginx;
worker_processes  auto;
daemon off;
events {
    worker_connections  1024;
}
http {
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    upstream  tomcat_webserver {
        server  web-tomcat-app1-service.web.svc.cluster.local:80; 
    }
    server {
        listen       80;
        server_name  localhost;
        location / {
            root   html;
            index  index.html index.htm;
        }
        location /webapp {
            root   html;
            index  index.html index.htm;
        }
        location /myapp {
             proxy_pass  http://tomcat_webserver;
             proxy_set_header   Host    $host;
             proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
             proxy_set_header X-Real-IP $remote_addr;
        }
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }
    }
}

app1.tar.gz

[root@k8s-master2 webapp]#cat index.html 
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>Devops</title>
</head>
<body>
<h1>web devops v11111111</h1>
</body>
</html>

[root@k8s-master2 webapp]#tar czvf app1.tar.gz index.html

index.html

[root@k8s-master2 nginx]#cat index.html 
nginx web1 v1
  • 构建镜像,并上传至本地harbor仓库
nerdctl build -t harbor.chu.net/web/nginx-web1:v1 .
nerdctl push harbor.chu.net/web/nginx-web1:v1

5.2.3 业务镜像测试

[root@k8s-master2 nginx]#nerdctl run -it -p 80:80 harbor.chu.net/web/nginx-web1:v1 bash
# 单机测试无法识别service名称,需添加hosts域名解析
[root@9d2ce970a25c /]# echo "127.0.0.1 web-tomcat-app1-service.web.svc.cluster.local" >> /etc/hosts
[root@9d2ce970a25c /]# nginx &
[1] 68

# 访问默认网页
[root@9d2ce970a25c /]# curl 127.0.0.1
nginx web1 v1

# 访问webapp
[root@9d2ce970a25c /]# curl -L 127.0.0.1/webapp
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>Devops</title>
</head>
<body>
<h1>web devops v11111111</h1>
</body>
</html>

5.2.4 在kubernetes中实现nginx+tomcat动静分离

编写测试yaml

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-nginx-deployment-label
  name: web-nginx-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-nginx-selector
  template:
    metadata:
      labels:
        app: web-nginx-selector
    spec:
      containers:
      - name: web-nginx-container
        image: harbor.chu.net/web/nginx-web1:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "20"

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-nginx-app1-service-label
  name: web-nginx-app1-service
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30031
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30032
  selector:
    app: web-nginx-selector
  • 验证测试
[root@k8s-master2 nginx]#kubectl get svc -n web
NAME                      TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
web-nginx-app1-service    NodePort   10.100.141.39   <none>        80:30031/TCP,443:30032/TCP   12s
web-tomcat-app1-service   NodePort   10.100.16.87    <none>        80:30030/TCP                 22s

[root@k8s-master2 nginx]#kubectl get ep -n web
NAME                      ENDPOINTS                              AGE
web-nginx-app1-service    10.200.169.175:443,10.200.169.175:80   16s
web-tomcat-app1-service   10.200.107.233:8080                    26s

[root@k8s-master2 nginx]#kubectl get pod -n web
NAME                                          READY   STATUS    RESTARTS   AGE
web-nginx-deployment-65c95bc6bf-vr2rs         1/1     Running   0          42s
web-tomcat-app1-deployment-5587f96995-ktzzx   1/1     Running   0          52s
[root@k8s-master2 nginx]#

浏览器访问

  • 访问默认网页

  • 访问webapp/

静态文件

  • 访问myapp/

动态网页跳转tomcat程序处理

index.jsp测试文件内容

<!DOCTYPE html>
<html>
<head>
    <title>JSP - Hello World</title>
</head>
<body>
<h1><%= "Hello World!" %>
</h1>
<br/>
<a href="/tomcatTest">Hello Servlet</a>
</body>
</html>

5.3 NFS

5.3.1 基于NFS实现数据共享

NFS服务器(10.0.0.101)配置

# 创建目录
mkdir -p /data/k8sdata/web/images /data/k8sdata/web/static
# 添加配置
cat /etc/exports
...
/data/k8sdata *(rw,sync,no_root_squash,no_subtree_check)

# 配置生效
[root@harbor1 data]#exportfs -r
# 查看挂载状态
[root@harbor1 data]#showmount -e 10.0.0.101
Export list for 10.0.0.101:
/data/k8sdata *

tomcat业务添加NFS

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-tomcat-app1-deployment-label
  name: web-tomcat-app1-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-tomcat-app1-selector
  template:
    metadata:
      labels:
        app: web-tomcat-app1-selector
    spec:
      containers:
      - name: web-tomcat-app1-container
        image: harbor.chu.net/web/tomcat-app1:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "18"
        #resources:
        #  limits:
        #    cpu: 1
        #    memory: "512Mi"
        #  requests:
        #    cpu: 500m
        #    memory: "512Mi"
        volumeMounts:
        - name: web-images
          mountPath: /usr/local/nginx/html/webapp/images
          readOnly: false
        - name: web-static
          mountPath: /usr/local/nginx/html/webapp/static
          readOnly: false
      volumes:
      - name: web-images
        nfs:
          server: 10.0.0.101			# NFS服务器
          path: /data/k8sdata/web/images
      - name: web-static
        nfs:
          server: 10.0.0.101
          path: /data/k8sdata/web/static

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-tomcat-app1-service-label
  name: web-tomcat-app1-service
  namespace: web
spec:
  #type: NodePort			# 隐藏宿主机端口
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    #nodePort: 30030			# 隐藏宿主机端口
  selector:
    app: web-tomcat-app1-selector

nginx服务添加NFS

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: web-nginx-deployment-label
  name: web-nginx-deployment
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web-nginx-selector
  template:
    metadata:
      labels:
        app: web-nginx-selector
    spec:
      containers:
      - name: web-nginx-container
        image: harbor.chu.net/web/nginx-web1:v1
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http
        - containerPort: 443
          protocol: TCP
          name: https
        env:
        - name: "password"
          value: "123456"
        - name: "age"
          value: "20"
        resources:
          limits:
            cpu: 500m
            memory: 512Mi
          requests:
            cpu: 500m
            memory: 256Mi
        volumeMounts:
        - name: web-images
          mountPath: /usr/local/nginx/html/webapp/images
          readOnly: false
        - name: web-static
          mountPath: /usr/local/nginx/html/webapp/static
          readOnly: false
      volumes:
      - name: web-images
        nfs:
          server: 10.0.0.101
          path: /data/k8sdata/web/images 
      - name: web-static
        nfs:
          server: 10.0.0.101
          path: /data/k8sdata/web/static

  

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: web-nginx-app1-service-label
  name: web-nginx-app1-service
  namespace: web
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30031
  - name: https
    port: 443
    protocol: TCP
    targetPort: 443
    nodePort: 30032
  selector:
    app: web-nginx-selector

查看

[root@k8s-master2 tomcat-app1]#kubectl get svc -n web
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
web-nginx-app1-service    NodePort    10.100.70.126    <none>        80:30031/TCP,443:30032/TCP   19m
web-tomcat-app1-service   ClusterIP   10.100.240.205   <none>        80/TCP                       31m

配置负载均衡

增加keepalived VIP 10.0.0.35

cat /etc/keepalived/keepalived.conf
...
    virtual_ipaddress {
        10.0.0.10/24 dev eth0 label eth0:0
        10.0.0.35/24 dev eth0 label eth0:2
    }

配置haproxy

cat /etc/haproxy/haproxy.cfg
...
# 添加如下配置
listen web_nginx_http_80
    bind 10.0.0.35:80
    mode tcp
    server 10.0.0.11 10.0.0.11:30031 check inter 3s fall 3 rise 5
    server 10.0.0.12 10.0.0.12:30031 check inter 3s fall 3 rise 5
    server 10.0.0.13 10.0.0.13:30031 check inter 3s fall 3 rise 5
    server 10.0.0.41 10.0.0.41:30031 check inter 3s fall 3 rise 5
    server 10.0.0.42 10.0.0.42:30031 check inter 3s fall 3 rise 5
    server 10.0.0.43 10.0.0.43:30031 check inter 3s fall 3 rise 5


listen web_nginx_https_443
    bind 10.0.0.35:443
    mode tcp
    #balance source
    server 10.0.0.11 10.0.0.11:30032 check inter 3s fall 3 rise 5
    server 10.0.0.12 10.0.0.12:30032 check inter 3s fall 3 rise 5
    server 10.0.0.13 10.0.0.13:30032 check inter 3s fall 3 rise 5
    server 10.0.0.41 10.0.0.41:30032 check inter 3s fall 3 rise 5
    server 10.0.0.42 10.0.0.42:30032 check inter 3s fall 3 rise 5
    server 10.0.0.43 10.0.0.43:30032 check inter 3s fall 3 rise 5

​​

5.3.2 在后端服务生成数据并验证访问

准备数据

# 生成数据至NFS:/data/k8sdata/web/images、static目录下
# 下载图片至/data/k8sdata/web/images目录下
[root@harbor1 static]#echo 'web static test' > /data/k8sdata/web/static/1.html

[root@harbor1 static]#tree /data/k8sdata/web/
/data/k8sdata/web/
├── images
│   └── 1.jpg
└── static
    └── 1.html

2 directories, 2 files

访问验证

访问默认页面

跳转tomcat页面

访问后端NFS图片

访问后端NFS网页

六、运行zookeeper集群

6.1 构建镜像

准备java基础镜像

nerdctl pull elevy/slim_java:8
nerdctl tag elevy/slim_java:8 harbor.chu.net/baseimages/slim_java:8
nerdctl push harbor.chu.net/baseimages/slim_java:8

编写Dockerfile

FROM harbor.chu.net/baseimages/slim_java:8

ENV ZK_VERSION 3.4.14
ADD repositories /etc/apk/repositories 
# Download Zookeeper
COPY zookeeper-3.4.14.tar.gz /tmp/zk.tgz
COPY zookeeper-3.4.14.tar.gz.asc /tmp/zk.tgz.asc
COPY KEYS /tmp/KEYS
RUN apk add --no-cache --virtual .build-deps \
      ca-certificates   \
      gnupg             \
      tar               \
      wget &&           \
    #
    # Install dependencies
    apk add --no-cache 	\
      bash &&           \
    #
    #
    # Verify the signature
    export GNUPGHOME="$(mktemp -d)" && \
    gpg -q --batch --import /tmp/KEYS && \
    gpg -q --batch --no-auto-key-retrieve --verify /tmp/zk.tgz.asc /tmp/zk.tgz && \
    #
    # Set up directories
    #
    mkdir -p /zookeeper/data /zookeeper/wal /zookeeper/log && \
    #
    # Install
    tar -x -C /zookeeper --strip-components=1 --no-same-owner -f /tmp/zk.tgz && \
    #
    # Slim down
    cd /zookeeper && \
    cp dist-maven/zookeeper-${ZK_VERSION}.jar . && \
    rm -rf \
      *.txt \
      *.xml \
      bin/README.txt \
      bin/*.cmd \
      conf/* \
      contrib \
      dist-maven \
      docs \
      lib/*.txt \
      lib/cobertura \
      lib/jdiff \
      recipes \
      src \
      zookeeper-*.asc \
      zookeeper-*.md5 \
      zookeeper-*.sha1 && \
    #
    # Clean up
    apk del .build-deps && \
    rm -rf /tmp/* "$GNUPGHOME"

COPY conf /zookeeper/conf/
COPY bin/zkReady.sh /zookeeper/bin/
COPY entrypoint.sh /
RUN  chmod a+x /zookeeper/bin/zkReady.sh /entrypoint.sh

ENV PATH=/zookeeper/bin:${PATH} \
    ZOO_LOG_DIR=/zookeeper/log \
    ZOO_LOG4J_PROP="INFO, CONSOLE, ROLLINGFILE" \
    JMXPORT=9010

ENTRYPOINT [ "/entrypoint.sh" ]

CMD [ "zkServer.sh", "start-foreground" ]

EXPOSE 2181 2888 3888 9010

执行构建

nerdctl build -t harbor.chu.net/web/zookeeper:v3.4.14 .
nerdctl push harbor.chu.net/web/zookeeper:v3.4.14

测试镜像

bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower

6.2 创建PV

NFS服务器(10.0.0.101)创建目录

mkdir -p /data/k8sdata/web/zookeeper-datadir-1
mkdir -p /data/k8sdata/web/zookeeper-datadir-2
mkdir -p /data/k8sdata/web/zookeeper-datadir-3

编写yaml文件

apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-1
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce 
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/zookeeper-datadir-1 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-2
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101
    path: /data/k8sdata/web/zookeeper-datadir-2 

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-pv-3
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.0.101 
    path: /data/k8sdata/web/zookeeper-datadir-3

查看

[root@k8s-master2 pv]#kubectl get pv
NAME                     CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS   REASON   AGE
zookeeper-datadir-pv-1   20Gi       RWO            Retain           Available                                   14s
zookeeper-datadir-pv-2   20Gi       RWO            Retain           Available                                   14s
zookeeper-datadir-pv-3   20Gi       RWO            Retain           Available                                   14s

6.3 创建PVC

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-1
  namespace: web
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-1
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-2
  namespace: web
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-2
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-pvc-3
  namespace: web
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-pv-3
  resources:
    requests:
      storage: 10Gi

查看

[root@k8s-master2 pv]#kubectl get pvc -n web
NAME                      STATUS   VOLUME                   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
zookeeper-datadir-pvc-1   Bound    zookeeper-datadir-pv-1   20Gi       RWO                           18s
zookeeper-datadir-pvc-2   Bound    zookeeper-datadir-pv-2   20Gi       RWO                           17s
zookeeper-datadir-pvc-3   Bound    zookeeper-datadir-pv-3   20Gi       RWO                           17s

6.4 创建zookeeper集群

apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  namespace: web
spec:
  ports:
    - name: client
      port: 2181
  selector:
    app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper1
  namespace: web
spec:
  type: NodePort    
  ports:
    - name: client
      port: 2181
      nodePort: 32181
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "1"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper2
  namespace: web
spec:
  type: NodePort    
  ports:
    - name: client
      port: 2181
      nodePort: 32182
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "2"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper3
  namespace: web
spec:
  type: NodePort    
  ports:
    - name: client
      port: 2181
      nodePort: 32183
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "3"
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: zookeeper1
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "1"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.chu.net/web/zookeeper:v3.4.14
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "1"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-1 
      volumes:
        - name: zookeeper-datadir-pvc-1 
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-1
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: zookeeper2
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "2"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.chu.net/web/zookeeper:v3.4.14
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "2"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-2 
      volumes:
        - name: zookeeper-datadir-pvc-2
          persistentVolumeClaim:
            claimName: zookeeper-datadir-pvc-2
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: zookeeper3
  namespace: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "3"
    spec:
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir:
            medium: Memory
      containers:
        - name: server
          image: harbor.chu.net/web/zookeeper:v3.4.14
          imagePullPolicy: Always
          env:
            - name: MYID
              value: "3"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-pvc-3
      volumes:
        - name: zookeeper-datadir-pvc-3
          persistentVolumeClaim:
           claimName: zookeeper-datadir-pvc-3

6.5 验证集群状态

查看service

[root@k8s-master2 zookeeper]#kubectl get svc -n web
NAME                      TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
zookeeper                 ClusterIP   10.100.100.250   <none>        2181/TCP                                       53s
zookeeper1                NodePort    10.100.56.175    <none>        2181:32181/TCP,2888:57732/TCP,3888:62659/TCP   53s
zookeeper2                NodePort    10.100.233.22    <none>        2181:32182/TCP,2888:55598/TCP,3888:51327/TCP   53s
zookeeper3                NodePort    10.100.38.10     <none>        2181:32183/TCP,2888:56595/TCP,3888:51777/TCP   53s

查看pod、logs等状态

[root@k8s-master2 zookeeper]#kubectl get pod -n web
NAME                                          READY   STATUS    RESTARTS   AGE
zookeeper1-56679f8f44-xh2hh                   1/1     Running   0          34s
zookeeper2-5cd9f77979-xbsmt                   1/1     Running   0          34s
zookeeper3-5b75f6546b-d6k8q                   1/1     Running   0          34s

查看zookeeper容器状态

# 进入容器,查看集群状态
[root@k8s-master2 zookeeper]#kubectl exec -it zookeeper1-56679f8f44-xh2hh -n web bash
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: follower			#集群状态,leader主、follower从
   
bash-4.3# cat /zookeeper/conf/zoo.cfg 
tickTime=10000				# 通信心跳数,建议设置10000(毫秒)
initLimit=10
syncLimit=5
dataDir=/zookeeper/data
dataLogDir=/zookeeper/wal
#snapCount=100000
autopurge.purgeInterval=1
clientPort=2181
quorumListenOnAllIPs=true
server.1=zookeeper1:2888:3888
server.2=zookeeper2:2888:3888
server.3=zookeeper3:2888:3888

数据写入

#! /bin/env python
# kazoo 2.9.0版本有问题,安装指定版本号kazoo==2.8.0
from kazoo.client import KazooClient

# 服务器地址、端口号
zk = KazooClient(hosts='10.0.0.41:32183')

# 创建连接
zk.start()

# makepath=True递归创建目录
zk.create('/web/kafka/nodes/id-1', b'10.0.0.41', makepath=True)

# 查看所有数据
node = zk.get_children('/')
print(node)

# 关闭连接
zk.stop()

登录ZooInspector图形工具查看

查看写入数据

标签:web,name,kubernetes,harbor,nginx,myserver,root
From: https://www.cnblogs.com/areke/p/17086791.html

相关文章

  • kubernetes 污点(Taint)和容忍度(Toleration)研究
    1.文档官方文档https://kubernetes.io/zh-cn/docs/concepts/scheduling-eviction/taint-and-toleration/2.知识点2.1.什么污点和容忍度?污点(Taint)是应用在节点之上......
  • Kubernetes(k8s)配置文件管理:ConfigMap
    目录一.系统环境二.前言三.ConfigMap概览四.创建ConfigMap五.ConfigMap的使用5.1以环境变量的方式使用ConfigMap5.2以卷的方式使用ConfigMap一.系统环境服务器版本......
  • Kubernetes搭建Yearning与简单使用
    背景:数据库基本为myql,数量不是很多,过去一直默认开启了防火墙模式通过公司固定IP,远程访问操作mysql。疫情原因,一些小伙伴不喜欢远程通过公司的网络去连接mysql,频繁添加防......
  • kubernetes对接NFS动态存储
    存储PK根据不同的场景,可以考虑用Ceph、GlusterFS或NFS来存储Kubernetes数据。Ceph有较强的性能和容错能力,通常适用于中小规模的Kubernetes组件;GlusterFS具有可伸缩性,适用......
  • kubernetes get pods 跟踪
    之前重启pods的时候总是不停的敲sudokubectlgetpods,今天发现居然有watch功能,太方便了,记录一下sudokubectlgetpods-w-lapp=zk-owide-w就是watch的意思-lap......
  • kubernetes affnity亲和力研究
    1.官方文档https://kubernetes.io/zh-cn/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity2.亲和性与反亲和性 亲和性和反亲和性扩......
  • 部署Kubernetes Cluster
      安装方法kubernetes二进制安装(配置最繁琐,不亚于安装openstack)kubeadm安装(谷歌推出的自动化安装工具,网络有要求)minikube安装(仅仅用来体验k8s)yum安......
  • API对象--Service(chrono《kubernetes入门实战课》笔记整理)
     【概念解说】当pod被实例化出来,如果运行一段时间会销毁,虽然deployment和ds会注意管理和维护pod的数目,但是pod销毁后再重建,ip会发生变化,这对于服务来说,是很麻烦的。所......
  • Kubernetes:开源 K8s 管理工具 Rancher 认知
    写在前面博文内容涉及​​Rancher​​的介绍,集群内安装查看​​Rancher​​的基本功能理解不足小伙伴帮忙指正我所渴求的,無非是將心中脫穎語出的本性付諸生活,為何竟如......
  • kubernetes中Jenkins无法push到阿里云镜像仓库
    1、Jenkins打包发布发现报错,无法push至阿里云镜像仓库   2、判断是Jenkins没有权限将镜像推送到阿里镜像仓库,由于仓库密码更换需要对Jenkins的secret的凭证也需要更......