首页 > 其他分享 >k8s DNS

k8s DNS

时间:2023-03-15 17:00:21浏览次数:39  
标签:k8s dns kubernetes DNS io kube name

一、kube-dns

目前常用的dns组件有kube-dns和coredns两个,用于解析k8s集群中service name所对应得到IP地址。

DNS全称:service名.namespace名.svc.zzhz.local



#创建pod测试查看DNS地址和名称(域名无法ping通,是DNS没有设置)
kubectl run net-test1 --image=alpine --replicas=4 sleep 360000 

[root@localhost7G ~]# docker exec  -it k8s_net-test1_net-test1-5fcc69db59-v7zqg_  sh

/ # cat /etc/resolv.conf 
nameserver 10.10.0.2
search default.svc.zzhz.local. svc.zzhz.local. zzhz.local. localdomain
options ndots:5


/ # ping 223.6.6.6
PING 223.6.6.6 (223.6.6.6): 56 data bytes
64 bytes from 223.6.6.6: seq=0 ttl=127 time=5.307 ms
64 bytes from 223.6.6.6: seq=1 ttl=127 time=7.684 ms
64 bytes from 223.6.6.6: seq=2 ttl=127 time=6.144 ms

/ # ping www.qq.com
ping: bad address 'www.qq.com'



kube-dns组件名称
kube-dns:   提供service name域名的解析
dns-dnsmasq:提供DNS缓存,降低kubedns负载,提高性能
dns-sidecar:定期检查kubedns和dnsmasq的健康状态


#镜像制作
docker load  -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz 
docker load  -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz 
docker load  -i k8s-dns-sidecar-amd64_1.14.13.tar.gz 
docker tag 333fb0833870  harbor.zzhz.com/baseimage/k8s-dns-sidecar-amd64:1.14.13
docker tag 82f954458b31  harbor.zzhz.com/baseimage/k8s-dns-kube-dns-amd64:1.14.13
docker tag 7b15476a7228  harbor.zzhz.com/baseimage/k8s-dns-dnsmasq-nanny-amd64:1.14.13
docker push harbor.zzhz.com/baseimage/k8s-dns-sidecar-amd64:1.14.13 
docker push harbor.zzhz.com/baseimage/k8s-dns-dnsmasq-nanny-amd64:1.14.13 
docker push harbor.zzhz.com/baseimage/k8s-dns-kube-dns-amd64:1.14.13 

#查看配置文件
[root@localhost7C k8s]# cat kube-dns.yaml
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.10.0.2  #  测试创建一个容器,查看DNS文件定义的,
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      securityContext:
        supplementalGroups: [ 65534 ]
        fsGroup: 65534
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      containers:
      - name: kubedns
        image: harbor.zzhz.com/baseimage/k8s-dns-kube-dns-amd64:1.14.13 #
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 512Mi    #硬件资源设置
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that's available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=zzhz.local. #二进制安装里的域名名称
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: harbor.zzhz.com/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13 # 
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --no-negcache
        - --dns-loop-detect
        - --log-facility=-
        - --server=/zzhz.local/127.0.0.1#10053 #zzhz.local 的域交给127.0.0.1 的100053端口去解析
        #- --server=/zjol.com/6.6.6.6#10053  #zjol.com 的域交给6.6.6.6 的100053端口去解析
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: harbor.zzhz.com/baseimages/k8s-dns-sidecar-amd64:1.14.13  #
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.zzhz.local,5,SRV #
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.zzhz.local,5,SRV   #
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don't use cluster DNS.
      serviceAccountName: kube-dns


#部署
 kubectl apply -f kube-dns.yaml
 
#测试
 [root@localhost7G ~]# docker exec  -it k8s_net-test1_net-test1-5fcc69db59-v7zqg_  sh
 / # ping www.qq.com
PING www.qq.com (101.91.42.232): 56 data bytes
64 bytes from 101.91.42.232: seq=0 ttl=127 time=12.003 ms
64 bytes from 101.91.42.232: seq=1 ttl=127 time=12.686 ms
64 bytes from 101.91.42.232: seq=2 ttl=127 time=12.697 ms

 ---------------------------------------------------------------
 

 

二、coredns

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:       #设置名称和Forward
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        #DNS名称
        kubernetes zzhz.local in-addr.arpa ip6.arpa {  
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        #解析不了的转发
        forward . 223.6.6.6
        #forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: k8s-app
                operator: In
                values: ["kube-dns"]
            topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: coredns/coredns:1.6.7 #镜像地址
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 700Mi  #资源限制
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.10.0.2  #DNS IP  
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

 

标签:k8s,dns,kubernetes,DNS,io,kube,name
From: https://www.cnblogs.com/Yuanbangchen/p/17219145.html

相关文章

  • ansible方式安装二进制k8s
    一、主机清单192.168.80.100localhost7A.localdomainharborCentOS7.7192.168.80.110localhost7B.localdomainkeepalivedhaproxyansible......
  • k8s(Kubernetes)中yaml文件的各种 kind 类型
    k8syaml中文件内容一般有kind类型之分,每种类型有不同的功能(一般用---符号隔开)常见的kind类型1、EndpointsEndpoints可以把外部的链接到k8s系统中(可以理解为引用外部资......
  • mDNS协议初识
    mDNS协议初识TRANSLATEwithxEnglishArabicHebrewPolishBulgarianHindiPortugueseCatalanHmongDawRomanianChineseSimplifiedHungarian......
  • [k8s]使用nfs挂载pod的应用日志文件
    前言某些特殊场景下应用日志无法通过elk、grafana等工具直接查看,需要将日志文件挂载出来再处理。本文以nfs作为远程存储,统一存放pod日志。系统版本:CentOS7x86-64宿主......
  • K8S对外服务之Ingress
    一、Ingress简介service的作用体现在两个方面,对集群内部,它不断跟踪pod的变化,更新endpoint(端点)中对应pod的对象,提供了ip不断变化的pod的服务发现机制;对集群外部,他类似负载......
  • K8S的安全机制
    前言:机制Kubernetes作为一个分布式集群的管理工具,保证集群的安全性是其一个重要的任务。APIServer是集群内部各个组件通信的中介,也是外部控制的入口。所以Kubernetes......
  • K8S中Helm _
    前言在我们yum管理工具,主要解决的是包之间的依赖问题,而我们的helm工具是安装服务的问题,在我们的k8s中的包管理工具helm,它可以通过一些仓库去下载一些我们想要yalm文件,我们......
  • DNS的配置与测试
    前置要求:需求:任意两台虚拟机(本人用的为WindowsServer2012、Win7)Win2012:Win7:网络适配器:统一配置为Lan1段统一关闭防火墙可按照个人喜好为计算机命名一、  添加......
  • Linux网络服务:DNS域名服务系统
    DNS域名系统服务1.DNS介绍1.1什么是域名?域名(DomainName),简称域名、网域,是由一串用点分隔的名字组成的Internet上某一台计算机或计算机组的名称,用于在数据传输时标识计......
  • 在k8s中,怎么模拟负载,利用HPA进行自动扩缩容的场景?
    1、通过下面的定义创建应用的podapiVersion:apps/v1kind:Deploymentmetadata:name:php-apachespec:selector:matchLabels:run:php-apachet......