一、集群信息
操作系统:ubuntu 22.04 内核:5.15.0-105-generic containerd版本:1.6.31 kubernetes版本:1.28.9 网络插件:cilium cilium版本:1.15.3 pod负载均衡: metallb metallb版本:0.14.5
1.1、节点准备
k8s-cilium-master-01 172.16.88.61 4vcpu 8G 50G k8s-cilium-master-02 172.16.88.62 4vcpu 8G 50G k8s-cilium-master-03 172.16.88.63 4vcpu 8G 50G k8s-cilium-node-01 172.16.88.71 4vcpu 8G 50G k8s-cilium-node-02 172.16.88.72 4vcpu 8G 50G k8s-cilium-node-03 172.16.88.73 4vcpu 8G 50G k8s-cilium-node-04 172.16.88.74 4vcpu 8G 50G k8s-cilium-node-05 172.16.88.75 4vcpu 8G 50G
1.2、初始节点信息
通过ansible批量修改主机名
[root@cyh-dell-rocky9-02 ~]# cat /etc/ansible/hosts [vm1] 172.16.88.61 hostname=k8s-cilium-master-01 ansible_ssh_port=22 ansible_ssh_pass=redhat 172.16.88.62 hostname=k8s-cilium-master-02 ansible_ssh_port=22 ansible_ssh_pass=redhat 172.16.88.63 hostname=k8s-cilium-master-03 ansible_ssh_port=22 ansible_ssh_pass=redhat 172.16.88.71 hostname=k8s-cilium-node-01 ansible_ssh_port=22 ansible_ssh_pass=redhat 172.16.88.72 hostname=k8s-cilium-node-02 ansible_ssh_port=22 ansible_ssh_pass=redhat 172.16.88.73 hostname=k8s-cilium-node-03 ansible_ssh_port=22 ansible_ssh_pass=redhat 172.16.88.74 hostname=k8s-cilium-node-04 ansible_ssh_port=22 ansible_ssh_pass=redhat 172.16.88.75 hostname=k8s-cilium-node-05 ansible_ssh_port=22 ansible_ssh_pass=redhat [vm] 172.16.88.61 172.16.88.62 172.16.88.63 172.16.88.71 172.16.88.72 172.16.88.73 172.16.88.74 172.16.88.75 [root@cyh-dell-rocky9-02 ~]# [root@cyh-dell-rocky9-02 ~]# cat name.yml --- - hosts: vm1 remote_user: root tasks: - name: change name raw: "echo {{hostname|quote}} > /etc/hostname" - name: shell: hostname {{hostname|quote}} [root@cyh-dell-rocky9-02 ~]#
批量修改主机名:ansible-playbook ./name.yaml
在master推送ssh key
root@k8s-cilium-master-01:~#ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa root@k8s-cilium-master-01:~#for i in {62,63,71,72,73,74,75}; do sshpass -p 'redhat' ssh-copy-id -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa -p 22 [email protected].$i; done
初始化节点
cat >> /etc/hosts <<EOF 172.16.88.61 k8s-cilium-master-01 172.16.88.62 k8s-cilium-master-02 172.16.88.63 k8s-cilium-master-03 172.16.88.71 k8s-cilium-node-01 172.16.88.72 k8s-cilium-node-02 172.16.88.73 k8s-cilium-node-03 172.16.88.74 k8s-cilium-node-04 172.16.88.75 k8s-cilium-node-05 EOF swapoff -a sed -i 's@/swap.img@#/swap.img@g' /etc/fstab rm -fr /swap.img systemctl mask swap.img.swap ufw disable apt -y install apt-transport-https ca-certificates curl software-properties-common curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add - add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable" apt update && apt-get install containerd.io cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf overlay br_netfilter EOF modprobe overlay && modprobe br_netfilter # sysctl params required by setup, params persist across reboots cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.ipv4.ip_forward = 1 EOF sysctl -p /etc/sysctl.d/k8s.confbase.sh
ansible 'vm' -m script -a "./base.sh"
1.3、安装配置Containerd
首先,运行如下命令打印默认并保存默认配置 ~# mkdir /etc/containerd ~# containerd config default > /etc/containerd/config.toml 接下来,编辑生成的配置文件,完成如下几项相关的配置: 修改containerd使用SystemdCgroup [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] SystemdCgroup = true 配置Containerd使用国内Mirror站点上的pause镜像及指定的版本 [plugins."io.containerd.grpc.v1.cri"] sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9" 配置Containerd使用国内的Image加速服务,以加速Image获取 [plugins."io.containerd.grpc.v1.cri".registry] [plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] endpoint = ["https://docker.mirrors.ustc.edu.cn", "https://registry.docker-cn.com"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"] endpoint = ["https://registry.aliyuncs.com/google_containers"]
完整config.toml文件
root@k8s-cilium-master-01:~# cat /etc/containerd/config.toml disabled_plugins = [] imports = [] oom_score = 0 plugin_dir = "" required_plugins = [] root = "/var/lib/containerd" state = "/run/containerd" temp = "" version = 2 [cgroup] path = "" [debug] address = "" format = "" gid = 0 level = "" uid = 0 [grpc] address = "/run/containerd/containerd.sock" gid = 0 max_recv_message_size = 16777216 max_send_message_size = 16777216 tcp_address = "" tcp_tls_ca = "" tcp_tls_cert = "" tcp_tls_key = "" uid = 0 [metrics] address = "" grpc_histogram = false [plugins] [plugins."io.containerd.gc.v1.scheduler"] deletion_threshold = 0 mutation_threshold = 100 pause_threshold = 0.02 schedule_delay = "0s" startup_delay = "100ms" [plugins."io.containerd.grpc.v1.cri"] device_ownership_from_security_context = false disable_apparmor = false disable_cgroup = false disable_hugetlb_controller = true disable_proc_mount = false disable_tcp_service = true drain_exec_sync_io_timeout = "0s" enable_selinux = false enable_tls_streaming = false enable_unprivileged_icmp = false enable_unprivileged_ports = false ignore_deprecation_warnings = [] ignore_image_defined_volumes = false max_concurrent_downloads = 3 max_container_log_line_size = 16384 netns_mounts_under_state_dir = false restrict_oom_score_adj = false sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9" selinux_category_range = 1024 stats_collect_period = 10 stream_idle_timeout = "4h0m0s" stream_server_address = "127.0.0.1" stream_server_port = "0" systemd_cgroup = false tolerate_missing_hugetlb_controller = true unset_seccomp_profile = "" [plugins."io.containerd.grpc.v1.cri".cni] bin_dir = "/opt/cni/bin" conf_dir = "/etc/cni/net.d" conf_template = "" ip_pref = "" max_conf_num = 1 [plugins."io.containerd.grpc.v1.cri".containerd] default_runtime_name = "runc" disable_snapshot_annotations = true discard_unpacked_layers = false ignore_rdt_not_enabled_errors = false no_pivot = false snapshotter = "overlayfs" [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "" [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "io.containerd.runc.v2" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] BinaryName = "" CriuImagePath = "" CriuPath = "" CriuWorkPath = "" IoGid = 0 IoUid = 0 NoNewKeyring = false NoPivotRoot = false Root = "" ShimCgroup = "" SystemdCgroup = true [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] base_runtime_spec = "" cni_conf_dir = "" cni_max_conf_num = 0 container_annotations = [] pod_annotations = [] privileged_without_host_devices = false runtime_engine = "" runtime_path = "" runtime_root = "" runtime_type = "" [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] [plugins."io.containerd.grpc.v1.cri".image_decryption] key_model = "node" [plugins."io.containerd.grpc.v1.cri".registry] config_path = "" [plugins."io.containerd.grpc.v1.cri".registry.auths] [plugins."io.containerd.grpc.v1.cri".registry.configs] [plugins."io.containerd.grpc.v1.cri".registry.headers] [plugins."io.containerd.grpc.v1.cri".registry.mirrors] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] endpoint = ["https://docker.mirrors.ustc.edu.cn", "https://registry.docker-cn.com"] [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"] endpoint = ["https://registry.aliyuncs.com/google_containers"] [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] tls_cert_file = "" tls_key_file = "" [plugins."io.containerd.internal.v1.opt"] path = "/opt/containerd" [plugins."io.containerd.internal.v1.restart"] interval = "10s" [plugins."io.containerd.internal.v1.tracing"] sampling_ratio = 1.0 service_name = "containerd" [plugins."io.containerd.metadata.v1.bolt"] content_sharing_policy = "shared" [plugins."io.containerd.monitor.v1.cgroups"] no_prometheus = false [plugins."io.containerd.runtime.v1.linux"] no_shim = false runtime = "runc" runtime_root = "" shim = "containerd-shim" shim_debug = false [plugins."io.containerd.runtime.v2.task"] platforms = ["linux/amd64"] sched_core = false [plugins."io.containerd.service.v1.diff-service"] default = ["walking"] [plugins."io.containerd.service.v1.tasks-service"] rdt_config_file = "" [plugins."io.containerd.snapshotter.v1.aufs"] root_path = "" [plugins."io.containerd.snapshotter.v1.btrfs"] root_path = "" [plugins."io.containerd.snapshotter.v1.devmapper"] async_remove = false base_image_size = "" discard_blocks = false fs_options = "" fs_type = "" pool_name = "" root_path = "" [plugins."io.containerd.snapshotter.v1.native"] root_path = "" [plugins."io.containerd.snapshotter.v1.overlayfs"] mount_options = [] root_path = "" sync_remove = false upperdir_label = false [plugins."io.containerd.snapshotter.v1.zfs"] root_path = "" [plugins."io.containerd.tracing.processor.v1.otlp"] endpoint = "" insecure = false protocol = "" [proxy_plugins] [stream_processors] [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar" [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar+gzip" [timeouts] "io.containerd.timeout.bolt.open" = "0s" "io.containerd.timeout.shim.cleanup" = "5s" "io.containerd.timeout.shim.load" = "5s" "io.containerd.timeout.shim.shutdown" = "3s" "io.containerd.timeout.task.state" = "2s" [ttrpc] address = "" gid = 0 uid = 0 root@k8s-cilium-master-01:~#config.toml
最后,同步到其他节点并重新启动containerd服务。
~# for i in {62,63,71,72,73,74,75};do scp /etc/containerd/config.toml [email protected].$i:/etc/containerd/;done ~# systemctl daemon-reload && systemctl restart containerd
配置crictl客户端
cat > /etc/crictl.yaml <<EOF runtime-endpoint: unix:///run/containerd/containerd.sock image-endpoint: unix:///run/containerd/containerd.sock timeout: 10 debug: true EOF
1.4、安装配置kubernetes
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list apt-get update && apt-get install -y kubelet kubeadm kubectl 设置kubelet kubeadm kubectl不允许被动态更新 apt-mark hold kubelet kubeadm kubectl
配置kubernetes 控制平面endpoint节点信息,方便做高可用
root@k8s-cilium-master-01:~# cat /etc/hosts 172.16.88.61 k8s-cilium-master-01 kubeapi.k8s.com root@k8s-cilium-master-01:~# root@k8s-cilium-master-01:~# for i in {62,63,71,72,73,74,75};do scp /etc/hosts [email protected].$i:/etc;done
1.5、初始化kubernetes
kubeadm init \ --control-plane-endpoint="kubeapi.k8s.com" \ --kubernetes-version=v1.28.9 \ --pod-network-cidr=10.244.0.0/16 \ --service-cidr=10.96.0.0/12 \ --image-repository=registry.aliyuncs.com/google_containers \ --cri-socket unix:///var/run/containerd/containerd.sock \ --upload-certs
初始化详情
root@k8s-cilium-master-01:~# kubeadm init \ --control-plane-endpoint="kubeapi.k8s.com" \ --kubernetes-version=v1.28.9 \ --pod-network-cidr=10.244.0.0/16 \ --service-cidr=10.96.0.0/12 \ --image-repository=registry.aliyuncs.com/google_containers \ --cri-socket unix:///var/run/containerd/containerd.sock \ --upload-certs [init] Using Kubernetes version: v1.28.9 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [k8s-cilium-master-01 kubeapi.k8s.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.88.61] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-cilium-master-01 localhost] and IPs [172.16.88.61 127.0.0.1 ::1] [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-cilium-master-01 localhost] and IPs [172.16.88.61 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Starting the kubelet [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [apiclient] All control plane components are healthy after 9.005007 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: ba8510df5514ef3178b1d08293f7d4d7a725fdabacc1602a03587c1a8ee244ec [mark-control-plane] Marking the node k8s-cilium-master-01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] [mark-control-plane] Marking the node k8s-cilium-master-01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule] [bootstrap-token] Using token: 6i1bqc.gk7sf1alyiunuzry [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config Alternatively, if you are the root user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of the control-plane node running the following command on each as root: kubeadm join kubeapi.k8s.com:6443 --token 6i1bqc.gk7sf1alyiunuzry \ --discovery-token-ca-cert-hash sha256:77df5c95946f6467da3c9fe2c5360887290f6322d04c40d525970eebea814655 \ --control-plane --certificate-key ba8510df5514ef3178b1d08293f7d4d7a725fdabacc1602a03587c1a8ee244ec Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward. Then you can join any number of worker nodes by running the following on each as root: kubeadm join kubeapi.k8s.com:6443 --token 6i1bqc.gk7sf1alyiunuzry \ --discovery-token-ca-cert-hash sha256:77df5c95946f6467da3c9fe2c5360887290f6322d04c40d525970eebea814655 root@k8s-cilium-master-01:~#kubeadm init
同步key到master02、master03
master02、03创建etcd目录文件 mkdir -p /etc/kubernetes/pki/etcd/ 拷贝key scp /etc/kubernetes/pki/ca.* 172.16.88.62:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/ca.* 172.16.88.63:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.* 172.16.88.62:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.* 172.16.88.63:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.* 172.16.88.62:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.* 172.16.88.63:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/etcd/ca.* 172.16.88.62:/etc/kubernetes/pki/etcd/ scp /etc/kubernetes/pki/etcd/ca.* 172.16.88.63:/etc/kubernetes/pki/etcd/ # 将master2、master3加入集群,成为控制节点 kubeadm join kubeapi.k8s.com:6443 --token 6i1bqc.gk7sf1alyiunuzry \ --discovery-token-ca-cert-hash sha256:77df5c95946f6467da3c9fe2c5360887290f6322d04c40d525970eebea814655 \ --control-plane --certificate-key ba8510df5514ef3178b1d08293f7d4d7a725fdabacc1602a03587c1a8ee244ec 添加node节点 kubeadm join kubeapi.k8s.com:6443 --token 6i1bqc.gk7sf1alyiunuzry \ --discovery-token-ca-cert-hash sha256:77df5c95946f6467da3c9fe2c5360887290f6322d04c40d525970eebea814655
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
root@k8s-cilium-master-01:~# kubectl get node -owide NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME k8s-cilium-master-01 NotReady control-plane 10m v1.28.9 172.16.88.61 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 k8s-cilium-master-02 NotReady control-plane 2m8s v1.28.9 172.16.88.62 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 k8s-cilium-master-03 NotReady control-plane 73s v1.28.9 172.16.88.63 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 k8s-cilium-node-01 NotReady <none> 74s v1.28.9 172.16.88.71 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 k8s-cilium-node-02 NotReady <none> 72s v1.28.9 172.16.88.72 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 k8s-cilium-node-03 NotReady <none> 67s v1.28.9 172.16.88.73 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 k8s-cilium-node-04 NotReady <none> 65s v1.28.9 172.16.88.74 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 k8s-cilium-node-05 NotReady <none> 62s v1.28.9 172.16.88.75 <none> Ubuntu 22.04.4 LTS 5.15.0-105-generic containerd://1.6.31 root@k8s-cilium-master-01:~# root@k8s-cilium-master-01:~# kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-66f779496c-j68dq 0/1 Pending 0 10m kube-system coredns-66f779496c-khhw5 0/1 Pending 0 10m kube-system etcd-k8s-cilium-master-01 1/1 Running 7 10m kube-system etcd-k8s-cilium-master-02 1/1 Running 0 2m25s kube-system etcd-k8s-cilium-master-03 1/1 Running 0 70s kube-system kube-apiserver-k8s-cilium-master-01 1/1 Running 7 10m kube-system kube-apiserver-k8s-cilium-master-02 1/1 Running 0 2m24s kube-system kube-controller-manager-k8s-cilium-master-01 1/1 Running 8 (2m14s ago) 10m kube-system kube-controller-manager-k8s-cilium-master-02 1/1 Running 0 2m24s kube-system kube-controller-manager-k8s-cilium-master-03 1/1 Running 0 18s kube-system kube-proxy-4l9s5 1/1 Running 0 91s kube-system kube-proxy-5fg8w 1/1 Running 0 84s kube-system kube-proxy-67z5q 1/1 Running 0 79s kube-system kube-proxy-8668p 1/1 Running 0 89s kube-system kube-proxy-d28rk 1/1 Running 0 10m kube-system kube-proxy-lztwh 1/1 Running 0 2m25s kube-system kube-proxy-qcl2x 1/1 Running 0 82s kube-system kube-proxy-xwp2p 1/1 Running 0 90s kube-system kube-scheduler-k8s-cilium-master-01 1/1 Running 8 (2m9s ago) 10m kube-system kube-scheduler-k8s-cilium-master-02 1/1 Running 0 2m24s kube-system kube-scheduler-k8s-cilium-master-03 1/1 Running 0 29s root@k8s-cilium-master-01:~#
1.6、安装cilium网络插件
root@k8s-cilium-master-01:~# wget https://github.com/cilium/cilium-cli/releases/download/v0.16.4/cilium-linux-amd64.tar.gz root@k8s-cilium-master-01:~# tar -xf cilium-linux-amd64.tar.gz && mv cilium /usr/local/bin/ root@k8s-cilium-master-01:~# cilium version cilium-cli: v0.16.4 compiled with go1.22.1 on linux/amd64 cilium image (default): v1.15.3 cilium image (stable): unknown cilium image (running): unknown. Unable to obtain cilium version. Reason: release: not found root@k8s-cilium-master-01:~# root@k8s-cilium-master-01:~# cilium status /¯¯\ /¯¯\__/¯¯\ Cilium: 1 errors \__/¯¯\__/ Operator: disabled /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) \__/¯¯\__/ Hubble Relay: disabled \__/ ClusterMesh: disabled Containers: cilium cilium-operator Cluster Pods: 0/2 managed by Cilium Helm chart version: Errors: cilium cilium daemonsets.apps "cilium" not found status check failed: [daemonsets.apps "cilium" not found] root@k8s-cilium-master-01:~#
#配置cilium vxlan隧道模式,并开启ingress、hubble
cilium install \ --set kubeProxyReplacement=true \ --set ingressController.enabled=true \ --set ingressController.loadbalancerMode=dedicated \ --set ipam.mode=kubernetes \ --set routingMode=tunnel \ --set tunnelProtocol=vxlan \ --set ipam.operator.clusterPoolIPv4PodCIDRList=10.244.0.0/16 \ --set ipam.Operator.ClusterPoolIPv4MaskSize=24 \ --set hubble.enabled="true" \ --set hubble.listenAddress=":4244" \ --set hubble.relay.enabled="true" \ --set hubble.ui.enabled="true" \ --set prometheus.enabled=true \ --set operator.prometheus.enabled=true \ --set hubble.metrics.port=9665 \ --set hubble.metrics.enableOpenMetrics=true \ --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,httpV2:exemplars=true;labelsContext=source_ip\,source_namespace\,source_workload\,destination_ip\,destination_namespace\,destination_workload\,traffic_direction}"
安装详情
root@k8s-cilium-master-01:~# cilium install \ --set kubeProxyReplacement=true \ --set ingressController.enabled=true \ --set ingressController.loadbalancerMode=dedicated \ --set ipam.mode=kubernetes \ --set routingMode=tunnel \ --set tunnelProtocol=vxlan \ --set ipam.operator.clusterPoolIPv4PodCIDRList=10.244.0.0/16 \ --set ipam.Operator.ClusterPoolIPv4MaskSize=24 \ --set hubble.enabled="true" \ --set hubble.listenAddress=":4244" \ --set hubble.relay.enabled="true" \ --set hubble.ui.enabled="true" \ --set prometheus.enabled=true \ --set operator.prometheus.enabled=true \ --set hubble.metrics.port=9665 \ --set hubble.metrics.enableOpenMetrics=true \ --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,httpV2:exemplars=true;labelsContext=source_ip\,source_namespace\,source_workload\,destination_ip\,destination_namespace\,destination_workload\,traffic_direction}" ℹ️ Using Cilium version 1.15.3 标签:kube,服务,containerd,master,io,k8s,cilium,商城 From: https://www.cnblogs.com/cyh00001/p/18160587