1 二进制部署 k8s 集群及升级
1.1 环境
haproxy1+keepalived1+harbor #10.0.0.77 haproxy2+keepalived2+nfs #10.0.0.17 master节点1 #10.0.0.7 master节点2 #10.0.0.27 master节点3 #10.0.0.37 master节点vip #10.0.0.248 node节点1 #10.0.0.47 node节点2 #10.0.0.57 node节点3 #10.0.0.67 etcd1 #10.0.0.87 etcd2 #10.0.0.97 etcd3 #10.0.0.107 ansible #10.0.0.7 ca #10.0.0.7 #各节点开启ipv4_forword、优化内核参数和资源限制、不使用swap、关闭selinux、关闭防火墙、时间同步 #各节点修改hosts文件 # vi /etc/hosts ... 10.0.0.7 master1.testou.com 10.0.0.27 master2.testou.com 10.0.0.37 master3.testou.com 10.0.0.47 node1.testou.com 10.0.0.57 node2.testou.com 10.0.0.67 node3.testou.com 10.0.0.87 etcd1.testou.com 10.0.0.97 etcd2.testou.com 10.0.0.107 etcd3.testou.com 10.0.0.77 haproxy1.testou.com 10.0.0.17 haproxy2.testou.com 10.0.0.77 harbor.testou.com 10.0.0.17 nfs.testou.com #安装haproxy+keepalived,vip为10.0.0.248:6443,haproxy调度master使用mode tcp
1.2 harbor-https配置
1.2.1 CA配置
#在ansible主机签发harbor证书 #生成证书脚本 # cat ca.sh #!/bin/bash DOMAIN="testou.com" NODE_NAME="$1" #证书subject C=CN ST=beijing L=beijing O=testou #证书有效期 VALID="36500" check_position_variable () { if ! [ $# -eq 1 ]; then echo -e "Usage: bash $0 <node_name>\nExample: bash $0 master1" exit 1 fi } check_ca () { if ! [ -d /etc/ca ] ; then mkdir /etc/ca create_ca fi } create_ca () { cd /etc/ca #同时生成CA私钥和CA自签名证书: #CA私钥参数:加密算法、不加密私钥、指定私钥名称(不指定为privkey.pem) #CA证书参数:subject、有效期、序列号、指定证书名称(不指定为标准输出) openssl req -x509 -newkey rsa:2048 -nodes -keyout ca.key.pem \ -subj /C=${C}/ST=${ST}/L=${L}/O=${O}/CN=ca.${DOMAIN} -days ${VALID} -set_serial 0 -out ca.cert.pem &>/dev/null check_cert_status ca.cert.pem } create_cert () { cd /etc/ca CERT_SERIAL=$(echo $(ls -l /etc/ca/*key* | wc -l)+1 | bc) #同时生成客户机私钥和证书申请文件: #client私钥参数:加密算法、不加密私钥、指定私钥名称 #证书申请文件参数:subject、指定证书申请文件名称 openssl req -newkey rsa:2048 -nodes -keyout ${NODE_NAME}.${DOMAIN}.key \ -subj /C=${C}/ST=${ST}/L=${L}/O=${O}/CN=${NODE_NAME}.${DOMAIN} -out ${NODE_NAME}.${DOMAIN}.csr &>/dev/null #颁发证书: #参数:指定证书申请文件 #指定CA证书、CA私钥文件 #有效期、序列号、指定证书名称 openssl x509 -req -in ${NODE_NAME}.${DOMAIN}.csr \ -CA ca.cert.pem -CAkey ca.key.pem \ -days ${VALID} -set_serial ${CERT_SERIAL} -out ${NODE_NAME}.${DOMAIN}.crt &>/dev/null check_cert_status ${NODE_NAME}.${DOMAIN}.crt } check_cert_status () { if [ $? -eq 0 ] ; then echo "$1 证书创建成功" else echo "$1 证书创建失败" fi } main () { check_position_variable $@ check_ca create_cert } main $@ #生成harbor证书 # bash ca.sh harbor # ls /etc/ca/harbor* /etc/ca/harbor.testou.com.crt /etc/ca/harbor.testou.com.csr /etc/ca/harbor.testou.com.key #将证书拷备到harbor主机 # ssh [email protected] "mkdir -p /usr/local/harbor/certs" # scp /etc/ca/harbor.testou.com.* [email protected]:/usr/local/harbor/certs
1.2.2 harbor 主机配置
#修改harbor配置文件 # cd /usr/local/harbor # vi /usr/local/harbor/harbor.cfg hostname = harbor.testou.com ui_url_protocol = https ssl_cert = /usr/local/harbor/certs/harbor.testou.com.crt ssl_cert_key = /usr/local/harbor/certs/harbor.testou.com.key harbor_admin_password = harbor # docker-compose down # docker-compose up -d
1.3 ansible 主机配置
1.3.1 配置ssh免密其他主机
#传送公钥脚本
# cat push_authorized_key_sshpass.sh
#!/bin/bash
USER="root"
export SSHPASS="root"
#set_key_algorithm
ALGORITHM="rsa"
#ALGORITHM="dsa"
#ALGORITHM="ecdsa"
#ALGORITHM="ed25519"
IP_LIST="
10.0.0.7
10.0.0.17
10.0.0.27
10.0.0.37
10.0.0.47
10.0.0.57
10.0.0.67
10.0.0.77
10.0.0.87
10.0.0.97
10.0.0.107
"
. /etc/rc.d/init.d/functions
check_sshpass () {
if ! $(rpm -q sshpass &> /dev/null); then
yum -y install sshpass &> /dev/null
fi
}
check_ssh_key () {
if ! [ -f ${HOME}/.ssh/id_${ALGORITHM} ]; then
ssh-keygen -t ${ALGORITHM} -P '' -f ${HOME}/.ssh/id_${ALGORITHM} &> /dev/null
fi
}
push_ssh_key () {
for IP in ${IP_LIST}; do
sshpass -e ssh-copy-id ${USER}@${IP} -o StrictHostKeyChecking=no &> /dev/null
if [ $? -eq 0 ];then
action "push ssh key to ${IP} success."
else
action "push ssh key to ${IP} failed." false
fi
done
}
main () {
check_sshpass
check_ssh_key
push_ssh_key
}
main
#执行脚本
# bash push_authorized_key_sshpass.sh
1.3.2 master/node 节点安装 docker
#创建role # mkdir -p /etc/ansible/roles/docker-19.03.15/{files,tasks} #创建playbook文件 # cat /etc/ansible/roles/docker-19.03.15/docker-19.03.15.yml --- #binary install docker_19.03.15 - hosts: - master - node remote_user: root gather_facts: no serial: 10 roles: - role: docker-19.03.15 #上传docker二进制压缩包和安装脚本 # ls /etc/ansible/roles/docker-19.03.15/files/ binary_install_docker_19.03.15.sh docker-19.03.15.tgz #task文件 # cat /etc/ansible/roles/docker-19.03.15/tasks/main.yml --- - name: 发送docker二进制安装包 copy: src: binary_install_docker_19.03.15.sh dest: /usr/local/src - name: 发送docker安装脚本 copy: src: docker-19.03.15.tgz dest: /usr/local/src - name: 运行安装脚本 shell: chdir: /usr/local/src cmd: bash binary_install_docker_19.03.15.sh - name: 创建harbor证书目录 file: path: /etc/docker/certs.d/harbor.testou.com state: directory - name: 发送harbor证书 copy: src: /etc/ca/harbor.testou.com.crt dest: /etc/docker/certs.d/harbor.testou.com - name: 重启docker服务 systemd: name: docker state: restarted #安装脚本 # cat /etc/ansible/roles/docker-19.03.15/file/binary_install_docker_19.03.15.sh #!/bin/bash #下载解压二进制包 echo "下载解压二进制包" cd /usr/local/src/ tar xf docker-19.03.15.tgz -C /usr/local/ #软链接文件 echo "软链接文件" ln -s /usr/local/docker/* /usr/bin/ #准备containerd service文件 echo "准备containerd service文件" cat > /usr/lib/systemd/system/containerd.service <<EOF [Unit] Description=containerd container runtime Documentation=https://containerd.io After=network.target [Service] ExecStartPre=-/sbin/modprobe overlay ExecStart=/usr/bin/containerd KillMode=process Delegate=yes LimitNOFILE=1048576 LimitNPROC=infinity LimitCORE=infinity TasksMax=infinity [Install] WantedBy=multi-user.target EOF #准备docker service文件 echo "准备docker service文件" cat > /usr/lib/systemd/system/docker.service <<EOF [Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com BindsTo=containerd.service After=network-online.target firewalld.service containerd.service Wants=network-online.target Requires=docker.socket [Service] Type=notify ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --insecure-registry harbor.testou.com ExecReload=/bin/kill -s HUP $MAINPID TimeoutSec=0 RestartSec=2 Restart=always StartLimitBurst=3 StartLimitInterval=60s LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity TasksMax=infinity Delegate=yes KillMode=process [Install] WantedBy=multi-user.target EOF #准备docker socket文件 echo "准备docker socket文件" cat > /usr/lib/systemd/system/docker.socket <<EOF [Unit] Description=Docker Socket for the API PartOf=docker.service [Socket] ListenStream=/var/run/docker.sock SocketMode=0660 SocketUser=root SocketGroup=docker [Install] WantedBy=sockets.target EOF #镜像加速配置 [ -d /etc/docker ] || mkdir -p /etc/docker cat > /etc/docker/daemon.json <<EOF { "registry-mirrors": ["https://65ywp6un.mirror.aliyuncs.com"] } EOF #添加docker组 echo "添加docker组" groupadd -r docker #起服务 echo "起服务" systemctl enable --now containerd &> /dev/null systemctl enable --now docker &> /dev/null #清理安装目录 echo "清理安装目录" rm -rf /usr/local/src/binary_install_docker_19.03.15.sh /usr/local/src/docker-19.03.15.tgz #============================================================================= #修改ansible配置文件 # vi /etc/ansible/ansible.cfg host_key_checking = False #修改ansible主机列表 # vi /etc/ansible/hosts [master] 10.0.0.7 10.0.0.27 10.0.0.37 [node] 10.0.0.47 10.0.0.57 10.0.0.67 #执行playbook # ansible-playbook /etc/ansible/roles/docker-19.03.15/docker-19.03.15.yml
1.3.3 ansible 节点准备部署 role 依赖环境(github项目kubeasz)
#项目基于python2.7 # wget http://github.com/easzlab/kubeasz/releases/download/2.2.0/easzup # vi easzup ...... function download_all() { #install_docker && \ #跳过docker安装 get_kubeasz && \ get_k8s_bin && \ get_ext_bin && \ get_sys_pkg && \ get_offline_image } ...... # mv /etc/ansible/roles/docker-19.03.15/ /opt/ #备份docker安装role,执行脚本(easzup -D)将删除/etc/ansible # bash easzup -D #复制ansible主机模板文件并修改 # cp /etc/ansible/example/hosts.multi-node /etc/ansible/hosts # vi /etc/ansible/hosts [etcd] 10.0.0.87 NODE_NAME=etcd1 10.0.0.97 NODE_NAME=etcd2 10.0.0.107 NODE_NAME=etcd3 [kube-master] #配置2个master节点,后面测试新增master节点 10.0.0.7 10.0.0.27 [kube-node] #配置2个node节点,后面测试新增node节点 10.0.0.47 10.0.0.57 [ex-lb] 10.0.0.77 LB_ROLE=master EX_APISERVER_VIP=10.0.0.248 EX_APISERVER_PORT=6443 10.0.0.17 LB_ROLE=backup EX_APISERVER_VIP=10.0.0.248 EX_APISERVER_PORT=6443 [chrony] [all:vars] CONTAINER_RUNTIME="docker" CLUSTER_NETWORK="flannel" PROXY_MODE="ipvs" SERVICE_CIDR="192.168.0.0/20" CLUSTER_CIDR="172.31.0.0/16" NODE_PORT_RANGE="30000-60000" CLUSTER_DNS_DOMAIN="testou.com" bin_dir="/usr/bin" ca_dir="/etc/kubernetes/ssl" base_dir="/etc/ansible #测试节点是否可达 # ansible all -m ping
1.4 环境初始化
# ansible-playbook /etc/ansible/01.prepare.yml #编译高版本pip # wget https://files.pythonhosted.org/packages/08/25/f204a6138dade2f6757b4ae99bc3994aac28a5602c97ddb2a35e0e22fbc4/pip-20.1.1.tar.gz # tar xf /usr/local/src/pip-20.1.1.tar.gz -C /usr/local # cd /usr/local/pip-20.1.1/ # python setup.py install
1.5 部署 etcd 集群
# ansible-playbook /etc/ansible/02.etcd.yml #在etcd节点验证集群可用性 # ETCD_LIST="10.0.0.87 10.0.0.97 10.0.0.107" # for i in ${ETCD_LIST}; do \ ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${i}:2379 \ --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem \ --key=/etc/etcd/ssl/etcd-key.pem endpoint health; \ done
1.6 部署 master 节点
# pip install netaddr # ansible-playbook /etc/ansible/04.kube-master.yml
1.7 部署 node 节点
#playbook包含一个底层镜像用来初始化pod网络,确认可下载 # grep -R SANDBOX_IMAGE /etc/ansible/roles/kube-node/ /etc/ansible/roles/kube-node/defaults/main.yml:SANDBOX_IMAGE: "mirrorgooglecontainers/pause-amd64:3.1" #执行playbook后,会在node节点都装haproxy,一个对k8s-master高可用的一种实现方式 不同于依赖集中式haproxy,集中式haproxy坏,影响全部node 分布式haproxy坏,只影响当前node,分担集中式harproxy压力 node节点的kubelet和kube-proxy访问master-api时,本机6443调度到master1/2/3:6443,间接和etcd通信 # ansible-playbook /etc/ansible/05.kube-node.yml # kubectl get node
1.8 部署网络服务 flannel
# ansible-playbook /etc/ansible/06.network.yml #创建pod进行网络测试 # kubectl run net-test1 --image=alpine --replicas 4 sleep 360000 # kubectl get pod -A -o wide # kubectl exec -it net-test1-5fcc69db59-46hzw sh / # ping 10.0.0.7 #可达 / # ping 223.5.5.5 #可达 / # ping www.baidu.com #不可达,未安装DNS服务
1.9 添加 master/node 节点
#添加master节点 #先配置ssh免密,安装docker环境 # sed -ri '/role.*CONTAINER_RUNTIME/s/(.*)/#\1/' /etc/ansible/tools/03.addmaster.yml #不检查docker/containerd是否安装 # easzctl add-master 10.0.0.37 #添加node节点 #先配置ssh免密,安装docker环境 # sed -ri '/role.*CONTAINER_RUNTIME/s/(.*)/#\1/' /etc/ansible/tools/02.addnode.yml #不检查docker/containerd是否安装 # easzctl add-node 10.0.0.67
1.10 升级至 k8s-v1.17.4 版本
#基于替换kubelet等二进制程序,重启服务 #备份旧版本二进制程序 # mkdir /opt/k8s-v1.17.2 # cp -R /etc/ansible/bin/* /opt/k8s-v1.17.2/ #下载解压新版本二进制程序 官方二进制包,https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.17.md#downloads-for-v1174 # cd /usr/local/src; ls kubernetes-client-linux-amd64.tar.gz kubernetes-node-linux-amd64.tar.gz kubernetes-server-linux-amd64.tar.gz kubernetes.tar.gz # tar xf kubernetes.tar.gz # tar xf kubernetes-server-linux-amd64.tar.gz # tar xf kubernetes-node-linux-amd64.tar.gz # tar tf kubernetes-client-linux-amd64.tar.gz # mkdir /opt/k8s-v1.17.4 # cp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kubelet,kube-proxy} /opt/k8s-v1.17.4 #替换旧版本二进制程序 # \cp -f kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kubelet,kube-proxy} /opt/k8s-v1.17.4 /etc/ansible/bin/ #执行升级k8s版本role # ansible-playbook -t upgrade_k8s /etc/ansible/22.upgrade.yml #验证当前版本 # kubectl get node
1.11 部署 dashboard-v2.0.0-rc6
# mkdir /etc/ansible/manifests/dashboard/2.0.0-rc6 # cd /etc/ansible/manifests/dashboard/2.0.0-rc6 # wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc6/aio/deploy/recommended.yaml #修改yml文件 # vi recommended.yaml --- kind: Service #修改service配置段 apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: type: NodePort #暴露端口 ports: - port: 443 targetPort: 8443 nodePort: 30002 #暴露端口 selector: k8s-app: kubernetes-dashboard --- kind: Deployment #修改dashboard-pod配置段 apiVersion: apps/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kubernetes-dashboard spec: ...... spec: containers: - name: kubernetes-dashboard image: kubernetesui/dashboard:v2.0.0-rc6 imagePullPolicy: Always ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates - --namespace=kubernetes-dashboard - --token-ttl=7200 #dashboard前端自动注销时间,默认900s ...... # vi admin-user.yml --- apiVersion: v1 kind: ServiceAccount metadata: name: admin-user namespace: kubernetes-dashboard --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: admin-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: admin-user namespace: kubernetes-dashboard #如果namespace中存在kubernetes-dashboard则删除 # kubectl delete ns kubernetes-dashboard #创建资源 # kubectl apply -f /etc/ansible/manifests/dashboard/2.0.0-rc6 #浏览器访问:https://10.0.0.248:30002 #http不识别 #登录方式1.手动输入token # kubectl get secret -A | grep admin #找到secret名称 # kubectl describe secret admin-user-token-bfcgw -n kubernetes-dashboard #复制粘贴token后登录 #登录方式2.Kubeconfig #基于创建的kubeconfig文件登录dashboard # cp /root/.kube/config /usr/local/src/kubeconfig # sed -ri "/user:$/a\ token: $(kubectl describe secret $(kubectl get secret -A | awk '/admin/{print $2}') -n kubernetes-dashboard | awk 'END{print $2}')" /usr/local/src/kubeconfig # sz /usr/local/src/kubeconfig #选择kubeconfig文件后登录
1.12 部署 DNS 服务
1.12.1 CoreDNS-1.6.9
#github官方项目:github.com/coredns/coredns #项目中包含生成创建资源yml文件的脚本(deploy.sh),要求事先存在kube-dns,向kube-dns发起请求,抓取某些数据,生成创建资源yml文件 或直接用写好的coredns.yml创建dns资源对象 # mkdir /etc/ansible/manifests/dns # cd /etc/ansible/manifests/dns #上传文件夹,主要包含创建dns资源对象yml文件 #创建dns资源对象yml文件 # vi /etc/ansible/manifests/dns/CoreDNS/coredns.yml ...... --- apiVersion: apps/v1 kind: Deployment ...... spec: ...... containers: - name: coredns image: harbor.testou.com/k8s_base_images/coredns:1.6.7 #修改镜像地址为harbor ...... limits: memory: 512Mi #资源限制 --- apiVersion: v1 kind: ConfigMap ...... data: Corefile: | ...... kubernetes testou.com in-addr.arpa ip6.arpa { #改域名 ...... forward . 223.5.5.5 #无法解析的转发到bind/powerdns或公网dns-server #forward . /etc/resolv.conf ...... --- apiVersion: v1 kind: Service ...... spec: ...... clusterIP: 192.168.0.2 #service网段第2个地址,其他pod将dns-server指定为此地址 ...... #拉取镜像 # docker pull coredns/coredns:1.6.7 #重打tag号 # docker tag coredns/coredns:1.6.7 harbor.testou.com/k8s_base_images/coredns:1.6.7 #客户端登录(项目为公开时,下载无需登录,上传需登录) # docker login -u admin -p harbor harbor.testou.com #上传镜像至harbor # docker push harbor.testou.com/k8s_base_images/coredns:1.6.7 #创建dns资源 # kubectl apply -f /etc/ansible/manifests/dns/CoreDNS/coredns.yml #验证 #确认dns服务启动完成 # kubectl get pod -A | grep dns #进pod测试 # kubectl exec -it net-test1-5fcc69db59-46hzw sh / # ping 10.0.0.7 #可达 / # ping 223.5.5.5 #可达 / # ping www.baidu.com #可达 / # ping kubernetes #可达,同一namespace的service简写 / # ping dashboard-metrics-scraper #不可达,不同namespace的service简写 / # ping dashboard-metrics-scraper.kubernetes-dashboard.svc.testou.com #可达,不同namespace的service全称(svc简写.ns.svc.domain)
1.12.2 kube-dns-1.14.13
#谷歌镜像地址(国内不通):console.cloud.google.com/gcr/images/google-containers/GLOBAL # mkdir /etc/ansible/manifests/dns # cd /etc/ansible/manifests/dns #上传文件夹,主要包含镜像 #拷备dns资源对象文件并修改 # cp /usr/local/src/kubernetes/cluster/addons/dns/kube-dns/kube-dns.yaml.base /etc/ansible/manifests/dns/kube-dns/kube-dns.yml # vi /etc/ansible/manifests/dns/kube-dns/kube-dns.yml apiVersion: v1 kind: Service ...... spec: selector: k8s-app: kube-dns clusterIP: 192.168.0.2 #service网段第2个地址,其他pod将dns-server指定为此地址 ...... --- apiVersion: apps/v1 kind: Deployment ...... spec ...... containers: - name: kubedns image: harbor.testou.com/k8s_base_images/k8s-dns-kube-dns-amd64:1.14.13 #修改镜像地址为harbor resources: limits: memory: 512Mi #资源限制 ...... args: - --domain=testou.com. #修改域名后缀 ...... - name: dnsmasq image: harbor.testou.com/k8s_base_images/k8s-dns-dnsmasq-nanny-amd64:1.14.13 #修改镜像地址为harbor ...... args: ...... - --server=/testou.com/127.0.0.1#10053 #修改域名后缀 #- --server=/linux39.test/10.0.0.107#53 #无法解析的转发到bind/powerdns或公网dns-server ...... - name: sidecar image: harbor.testou.com/k8s_base_images/k8s-dns-sidecar-amd64:1.14.13 #修改镜像地址为harbor ...... args: ...... - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.testou.com,5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.testou.com,5,SRV #导入镜像 # docker load -i /etc/ansible/manifests/dns/kube-dns/k8s-dns-kube-dns-amd64_1.14.13.tar.gz # docker load -i /etc/ansible/manifests/dns/kube-dns/k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz # docker load -i /etc/ansible/manifests/dns/kube-dns/k8s-dns-sidecar-amd64_1.14.13.tar.gz #重打tag号 # docker tag gcr.io/google-containers/k8s-dns-kube-dns-amd64:1.14.13 harbor.testou.com/k8s_base_images/k8s-dns-kube-dns-amd64:1.14.13 # docker tag gcr.io/google-containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13 harbor.testou.com/k8s_base_images/k8s-dns-dnsmasq-nanny-amd64:1.14.13 # docker tag gcr.io/google-containers/k8s-dns-sidecar-amd64:1.14.13 harbor.testou.com/k8s_base_images/k8s-dns-sidecar-amd64:1.14.13 #客户端登录(项目为公开时,下载无需登录,上传需登录) # docker login --username=admin --password=harbor harbor.testou.com #上传镜像至harbor # docker push harbor.testou.com/k8s_base_images/k8s-dns-kube-dns-amd64:1.14.13 # docker push harbor.testou.com/k8s_base_images/k8s-dns-dnsmasq-nanny-amd64:1.14.13 # docker push harbor.testou.com/k8s_base_images/k8s-dns-sidecar-amd64:1.14.13 #创建dns资源 # kubectl apply -f /etc/ansible/manifests/dns/kube-dns/kube-dns.yml #验证 #确认dns服务启动完成 # kubectl get pod -A | grep dns #ready为3/3(1个pod内3个容器) #进pod测试 # kubectl exec -it net-test1-5fcc69db59-46hzw sh / # ping 10.0.0.7 #可达 / # ping 223.5.5.5 #可达 / # ping www.baidu.com #可达 / # ping kubernetes #可达,同一namespace的service简写 / # ping dashboard-metrics-scraper #不可达,不同namespace的service简写 / # ping dashboard-metrics-scraper.kubernetes-dashboard.svc.testou.com #可达,不同namespace的service全称(svc简写.ns.svc.domain)标签:10.0,19,etc,ansible,dns,docker,com From: https://www.cnblogs.com/ddjapfpapd/p/17477728.html