直接贴yaml apiVersion: v1 kind: Service metadata: name: zk-hs labels: app: zk spec: ports: - port: 2888 name: server - port: 3888 name: leader-election clusterIP: None selector: app: zk --- apiVersion: v1 kind: Service metadata: name: zk-cs labels: app: zk spec: ports: - port: 2181 name: client selector: app: zk --- apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: zk-pdb spec: selector: matchLabels: app: zk maxUnavailable: 1 --- apiVersion: apps/v1 kind: StatefulSet metadata: name: zk spec: selector: matchLabels: app: zk serviceName: zk-hs replicas: 3 updateStrategy: type: RollingUpdate podManagementPolicy: OrderedReady template: metadata: labels: app: zk spec: tolerations: - key: node-role.kubernetes.io/control-plane operator: Exists effect: NoSchedule affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: "app" operator: In #pod的反向亲和,导致了一个node只能有一个zookeeper values: - zk topologyKey: "kubernetes.io/hostname" containers: - name: kubernetes-zookeeper imagePullPolicy: IfNotPresent image: "registry.k8s.io/kubernetes-zookeeper:1.0-3.4.10" resources: requests: memory: "1Gi" cpu: "0.5" ports: - containerPort: 2181 name: client - containerPort: 2888 name: server - containerPort: 3888 name: leader-election command: - sh - -c - "start-zookeeper \ --servers=3 \ --data_dir=/var/lib/zookeeper/data \ --data_log_dir=/var/lib/zookeeper/data/log \ --conf_dir=/opt/zookeeper/conf \ --client_port=2181 \ --election_port=3888 \ --server_port=2888 \ --tick_time=2000 \ --init_limit=10 \ --sync_limit=5 \ --heap=512M \ --max_client_cnxns=60 \ --snap_retain_count=3 \ --purge_interval=12 \ --max_session_timeout=40000 \ --min_session_timeout=4000 \ --log_level=INFO" readinessProbe: exec: command: #虽然存活探针和就绪探针是相同的,但同时指定它们两者仍然重要。 这保证了 ZooKeeper ensemble 中只有健康的服务器能接收网络流量 - sh - -c - "zookeeper-ready 2181" initialDelaySeconds: 10 timeoutSeconds: 5 livenessProbe: exec: command: - sh - -c - "zookeeper-ready 2181" initialDelaySeconds: 10 timeoutSeconds: 5 volumeMounts: - name: datadir mountPath: /var/lib/zookeeper securityContext: runAsUser: 1000 fsGroup: 1000 volumeClaimTemplates: - metadata: name: datadir spec: storageclassName: managed-nfs-storage #定义自己的sc名称 accessModes: [ "ReadWriteOnce" ] resources: requests: storage: 10Gi 参考文档https://kubernetes.io/zh-cn/docs/tutorials/stateful-application/zookeeper/ 修改为自己的环境配置,由于我是3节点集群,master有污点,所以给了容忍度,并且镜像使用dockerhub上的一个高下载镜像 默认镜像不可下载,上传到自己harbor后,需要修改镜像拉去策略为 IfNotPresent 验证
官网中使用pdb来保证集群的选举,最多只有一个节点可以挂掉
节点1和2数据是一致的。之前我再节点0上create 了的 标签:name,zk,--,app,zookeeper,zooker,集群,K8S,spec From: https://www.cnblogs.com/whitelittle/p/16877664.html