一、kube-dns
目前常用的dns组件有kube-dns和coredns两个,用于解析k8s集群中service name所对应得到IP地址。 DNS全称:service名.namespace名.svc.zzhz.local #创建pod测试查看DNS地址和名称(域名无法ping通,是DNS没有设置) kubectl run net-test1 --image=alpine --replicas=4 sleep 360000 [root@localhost7G ~]# docker exec -it k8s_net-test1_net-test1-5fcc69db59-v7zqg_ sh / # cat /etc/resolv.conf nameserver 10.10.0.2 search default.svc.zzhz.local. svc.zzhz.local. zzhz.local. localdomain options ndots:5 / # ping 223.6.6.6 PING 223.6.6.6 (223.6.6.6): 56 data bytes 64 bytes from 223.6.6.6: seq=0 ttl=127 time=5.307 ms 64 bytes from 223.6.6.6: seq=1 ttl=127 time=7.684 ms 64 bytes from 223.6.6.6: seq=2 ttl=127 time=6.144 ms / # ping www.qq.com ping: bad address 'www.qq.com' kube-dns组件名称 kube-dns: 提供service name域名的解析 dns-dnsmasq:提供DNS缓存,降低kubedns负载,提高性能 dns-sidecar:定期检查kubedns和dnsmasq的健康状态 #镜像制作 docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz docker tag 333fb0833870 harbor.zzhz.com/baseimage/k8s-dns-sidecar-amd64:1.14.13 docker tag 82f954458b31 harbor.zzhz.com/baseimage/k8s-dns-kube-dns-amd64:1.14.13 docker tag 7b15476a7228 harbor.zzhz.com/baseimage/k8s-dns-dnsmasq-nanny-amd64:1.14.13 docker push harbor.zzhz.com/baseimage/k8s-dns-sidecar-amd64:1.14.13 docker push harbor.zzhz.com/baseimage/k8s-dns-dnsmasq-nanny-amd64:1.14.13 docker push harbor.zzhz.com/baseimage/k8s-dns-kube-dns-amd64:1.14.13 #查看配置文件 [root@localhost7C k8s]# cat kube-dns.yaml apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns clusterIP: 10.10.0.2 # 测试创建一个容器,查看DNS文件定义的, ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP --- apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. # 2. Default is 1. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: scheduler.alpha.kubernetes.io/critical-pod: '' seccomp.security.alpha.kubernetes.io/pod: 'docker/default' spec: priorityClassName: system-cluster-critical securityContext: supplementalGroups: [ 65534 ] fsGroup: 65534 tolerations: - key: "CriticalAddonsOnly" operator: "Exists" volumes: - name: kube-dns-config configMap: name: kube-dns optional: true containers: - name: kubedns image: harbor.zzhz.com/baseimage/k8s-dns-kube-dns-amd64:1.14.13 # resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: memory: 512Mi #硬件资源设置 requests: cpu: 100m memory: 70Mi livenessProbe: httpGet: path: /healthcheck/kubedns port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP # we poll on pod startup for the Kubernetes master service and # only setup the /readiness HTTP server once that's available. initialDelaySeconds: 3 timeoutSeconds: 5 args: - --domain=zzhz.local. #二进制安装里的域名名称 - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 env: - name: PROMETHEUS_PORT value: "10055" ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - containerPort: 10055 name: metrics protocol: TCP volumeMounts: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq image: harbor.zzhz.com/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13 # livenessProbe: httpGet: path: /healthcheck/dnsmasq port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --no-negcache - --dns-loop-detect - --log-facility=- - --server=/zzhz.local/127.0.0.1#10053 #zzhz.local 的域交给127.0.0.1 的100053端口去解析 #- --server=/zjol.com/6.6.6.6#10053 #zjol.com 的域交给6.6.6.6 的100053端口去解析 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP # see: https://github.com/kubernetes/kubernetes/issues/29055 for details resources: requests: cpu: 150m memory: 20Mi volumeMounts: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar image: harbor.zzhz.com/baseimages/k8s-dns-sidecar-amd64:1.14.13 # livenessProbe: httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.zzhz.local,5,SRV # - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.zzhz.local,5,SRV # ports: - containerPort: 10054 name: metrics protocol: TCP resources: requests: memory: 20Mi cpu: 10m dnsPolicy: Default # Don't use cluster DNS. serviceAccountName: kube-dns #部署 kubectl apply -f kube-dns.yaml #测试 [root@localhost7G ~]# docker exec -it k8s_net-test1_net-test1-5fcc69db59-v7zqg_ sh / # ping www.qq.com PING www.qq.com (101.91.42.232): 56 data bytes 64 bytes from 101.91.42.232: seq=0 ttl=127 time=12.003 ms 64 bytes from 101.91.42.232: seq=1 ttl=127 time=12.686 ms 64 bytes from 101.91.42.232: seq=2 ttl=127 time=12.697 ms ---------------------------------------------------------------
二、coredns
apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch - apiGroups: - "" resources: - nodes verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system data: #设置名称和Forward Corefile: | .:53 { errors health { lameduck 5s } ready #DNS名称 kubernetes zzhz.local in-addr.arpa ip6.arpa { fallthrough in-addr.arpa ip6.arpa } prometheus :9153 #解析不了的转发 forward . 223.6.6.6 #forward . /etc/resolv.conf cache 30 loop reload loadbalance } --- apiVersion: apps/v1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/name: "CoreDNS" spec: # replicas: not specified here: # 1. Default is 1. # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns spec: priorityClassName: system-cluster-critical serviceAccountName: coredns tolerations: - key: "CriticalAddonsOnly" operator: "Exists" nodeSelector: kubernetes.io/os: linux affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: k8s-app operator: In values: ["kube-dns"] topologyKey: kubernetes.io/hostname containers: - name: coredns image: coredns/coredns:1.6.7 #镜像地址 imagePullPolicy: IfNotPresent resources: limits: memory: 700Mi #资源限制 requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns readOnly: true ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - all readOnlyRootFilesystem: true livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /ready port: 8181 scheme: HTTP dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system annotations: prometheus.io/port: "9153" prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: kube-dns clusterIP: 10.10.0.2 #DNS IP ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - name: metrics port: 9153 protocol: TCP
标签:k8s,dns,kubernetes,DNS,io,kube,name From: https://www.cnblogs.com/Yuanbangchen/p/17219145.html