1.部署Cilium网络组件
1.1 在k8s-master节点上,下载安装helm
wget https://mirrors.huaweicloud.com/helm/v3.15.2/helm-v3.15.2-linux-amd64.tar.gz
tar -zxvf helm-v3.15.2-linux-amd64.tar.gz
cp linux-amd64/helm /usr/bin/
# helm version
version.BuildInfo{Version:"v3.15.2", GitCommit:"1a500d5625419a524fdae4b33de351cc4f58ec35", GitTreeState:"clean", GoVersion:"go1.22.4"}
1.2 在任意k8s-master节点上,添加cilium安装源并下载安装包
# 添加安装源
helm repo add cilium https://helm.cilium.io
# 下载安装包,运行pull命令后会获得最新版cilium的tar包
helm pull cilium/cilium
tar -xvf cilium-*.tar
# 修改image为国内源
sed -i "s#quay.io/#m.daocloud.io/quay.io/#g" cilium/values.yaml
1.3 在任意k8s-master节点上安装cilium
# helm install cilium ./cilium/ \
--namespace kube-system \
--set hubble.relay.enabled=true \
--set hubble.ui.enabled=true \
--set prometheus.enabled=true \
--set operator.prometheus.enabled=true \
--set hubble.enabled=true \
--set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,http}"
注:如需开启IPv6可添加--set ipv6.enabled=true参数
# kubectl get pod -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system cilium-87hd8 1/1 Running 0 4m53s 192.168.83.221 k8s-node02 <none> <none>
kube-system cilium-9fdbh 1/1 Running 0 4m53s 192.168.83.220 k8s-node01 <none> <none>
kube-system cilium-operator-f45f4975f-f9q7p 1/1 Running 0 4m53s 192.168.83.220 k8s-node01 <none> <none>
kube-system cilium-operator-f45f4975f-gw5z6 1/1 Running 0 4m53s 192.168.83.221 k8s-node02 <none> <none>
kube-system hubble-relay-84849f9dd5-59zhs 1/1 Running 0 4m53s 172.31.0.28 k8s-node01 <none> <none>
kube-system hubble-ui-79b7f9f4b-ccdrh 2/2 Running 0 4m53s 172.31.0.209 k8s-node01 <none> <none>
# kubectl get all --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system pod/cilium-87hd8 1/1 Running 0 4m20s
kube-system pod/cilium-9fdbh 1/1 Running 0 4m20s
kube-system pod/cilium-operator-f45f4975f-f9q7p 1/1 Running 0 4m20s
kube-system pod/cilium-operator-f45f4975f-gw5z6 1/1 Running 0 4m20s
kube-system pod/hubble-relay-84849f9dd5-59zhs 1/1 Running 0 4m20s
kube-system pod/hubble-ui-79b7f9f4b-ccdrh 2/2 Running 0 4m20s
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.66.0.1 <none> 443/TCP 136d
kube-system service/cilium-agent ClusterIP None <none> 9964/TCP 4m20s
kube-system service/hubble-metrics ClusterIP None <none> 9965/TCP 4m20s
kube-system service/hubble-peer ClusterIP 10.66.180.91 <none> 443/TCP 4m20s
kube-system service/hubble-relay ClusterIP 10.66.79.186 <none> 80/TCP 4m20s
kube-system service/hubble-ui ClusterIP 10.66.91.101 <none> 80/TCP 4m20s
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-system daemonset.apps/cilium 2 2 2 2 2 kubernetes.io/os=linux 4m20s
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system deployment.apps/cilium-operator 2/2 2 2 4m20s
kube-system deployment.apps/hubble-relay 1/1 1 1 4m20s
kube-system deployment.apps/hubble-ui 1/1 1 1 4m20s
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system replicaset.apps/cilium-operator-f45f4975f 2 2 2 4m20s
kube-system replicaset.apps/hubble-relay-84849f9dd5 1 1 1 4m20s
kube-system replicaset.apps/hubble-ui-79b7f9f4b 1 1 1 4m20s
# kubectl get apiservices.apiregistration.k8s.io
NAME SERVICE AVAILABLE AGE
v1. Local True 136d
v1.admissionregistration.k8s.io Local True 136d
v1.apiextensions.k8s.io Local True 136d
v1.apps Local True 136d
v1.authentication.k8s.io Local True 136d
v1.authorization.k8s.io Local True 136d
v1.autoscaling Local True 136d
v1.batch Local True 136d
v1.certificates.k8s.io Local True 136d
v1.coordination.k8s.io Local True 136d
v1.discovery.k8s.io Local True 136d
v1.events.k8s.io Local True 136d
v1.flowcontrol.apiserver.k8s.io Local True 136d
v1.networking.k8s.io Local True 136d
v1.node.k8s.io Local True 136d
v1.policy Local True 136d
v1.rbac.authorization.k8s.io Local True 136d
v1.scheduling.k8s.io Local True 136d
v1.storage.k8s.io Local True 136d
v1alpha1.admissionregistration.k8s.io Local True 136d
v1alpha1.authentication.k8s.io Local True 136d
v1alpha1.internal.apiserver.k8s.io Local True 136d
v1alpha1.networking.k8s.io Local True 136d
v1alpha1.storage.k8s.io Local True 136d
v1alpha2.resource.k8s.io Local True 136d
v1beta1.admissionregistration.k8s.io Local True 136d
v1beta1.authentication.k8s.io Local True 136d
v1beta3.flowcontrol.apiserver.k8s.io Local True 136d
v2.autoscaling Local True 136d
v2.cilium.io Local True 90m
v2alpha1.cilium.io Local True 90m
1.4 在任意k8s-master节点上安装cilium专属监控面板
下载部署文件
# wget https://github.com/cilium/cilium/blob/main/examples/kubernetes/addons/prometheus/monitoring-example.yaml
替换镜像源
sed -i "s#docker.io/#dockerpull.com/#g" monitoring-example.yaml
sed -i "s/prom\/prometheus:v2.42.0/dockerpull.com\/prom\/prometheus:v2.42.0/g" monitoring-example.yaml
部署监控
# kubectl apply -f monitoring-example.yaml
namespace/cilium-monitoring created
serviceaccount/prometheus-k8s created
configmap/grafana-config created
configmap/grafana-cilium-dashboard created
configmap/grafana-cilium-operator-dashboard created
configmap/grafana-hubble-dashboard created
configmap/grafana-hubble-l7-http-metrics-by-workload created
configmap/prometheus created
clusterrole.rbac.authorization.k8s.io/prometheus created
clusterrolebinding.rbac.authorization.k8s.io/prometheus created
service/grafana created
service/prometheus created
deployment.apps/grafana created
deployment.apps/prometheus created
# kubectl get pod -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
cilium-monitoring grafana-74b486577f-7w978 1/1 Running 0 37s 172.31.1.205 k8s-node02 <none> <none>
cilium-monitoring prometheus-58668c58df-jt9sn 1/1 Running 0 37s 172.31.1.40 k8s-node02 <none> <none>
kube-system cilium-87hd8 1/1 Running 0 60m 192.168.83.221 k8s-node02 <none> <none>
kube-system cilium-9fdbh 1/1 Running 0 60m 192.168.83.220 k8s-node01 <none> <none>
kube-system cilium-operator-f45f4975f-f9q7p 1/1 Running 0 60m 192.168.83.220 k8s-node01 <none> <none>
kube-system cilium-operator-f45f4975f-gw5z6 1/1 Running 0 60m 192.168.83.221 k8s-node02 <none> <none>
kube-system coredns-78d4595769-gl8nx 1/1 Running 0 3h37m 172.31.1.225 k8s-node02 <none> <none>
kube-system hubble-relay-84849f9dd5-59zhs 1/1 Running 0 60m 172.31.0.28 k8s-node01 <none> <none>
kube-system hubble-ui-79b7f9f4b-ccdrh 2/2 Running 0 60m 172.31.0.209 k8s-node01 <none> <none>
1.5 在任意k8s-master节点,上将hubble-ui、grafan和prometheus的type修改为NodePort
# kubectl edit svc hubble-ui -n kube-system
# kubectl edit svc grafana -n cilium-monitoring
# kubectl edit svc prometheus -n cilium-monitoring
将type: ClusterIP修改为type: NodePort
# kubectl get svc -A | grep monitor
cilium-monitoring grafana NodePort 10.66.236.94 <none> 3000:32301/TCP 4h1m
cilium-monitoring prometheus NodePort 10.66.12.82 <none> 9090:30584/TCP 4h1m
# kubectl get svc -A | grep hubble
kube-system hubble-metrics ClusterIP None <none> 9965/TCP 5h1m
kube-system hubble-peer ClusterIP 10.66.180.91 <none> 443/TCP 5h1m
kube-system hubble-relay ClusterIP 10.66.79.186 <none> 80/TCP 5h1m
kube-system hubble-ui NodePort 10.66.91.101 <none> 80:32093/TCP 5h1m
2. 部署CoreDNS
2.1 在k8s-master节点上,创建CoreDNS配置文件
cat > /etc/kubernetes/yaml/coredns.yaml << EOF
# __MACHINE_GENERATED_WARNING__
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-cluster-critical
serviceAccountName: coredns
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
containers:
- name: coredns
image: registry.aliyuncs.com/google_containers/coredns/coredns:v1.11.1
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 300Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.66.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
2.2 应用CoreDNS配置文件
# kubectl apply -f /etc/kubernetes/yaml/coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
# kubectl get pod -n kube-system -o wide | grep coredns
coredns-78d4595769-gl8nx 1/1 Running 0 164m 172.31.1.225 k8s-node02 <none> <none>
# kubectl get all --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system pod/cilium-87hd8 1/1 Running 0 4m20s
kube-system pod/cilium-9fdbh 1/1 Running 0 4m20s
kube-system pod/cilium-operator-f45f4975f-f9q7p 1/1 Running 0 4m20s
kube-system pod/cilium-operator-f45f4975f-gw5z6 1/1 Running 0 4m20s
kube-system pod/coredns-78d4595769-gl8nx 1/1 Running 0 162m
kube-system pod/hubble-relay-84849f9dd5-59zhs 1/1 Running 0 4m20s
kube-system pod/hubble-ui-79b7f9f4b-ccdrh 2/2 Running 0 4m20s
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.66.0.1 <none> 443/TCP 136d
kube-system service/cilium-agent ClusterIP None <none> 9964/TCP 4m20s
kube-system service/hubble-metrics ClusterIP None <none> 9965/TCP 4m20s
kube-system service/hubble-peer ClusterIP 10.66.180.91 <none> 443/TCP 4m20s
kube-system service/hubble-relay ClusterIP 10.66.79.186 <none> 80/TCP 4m20s
kube-system service/hubble-ui ClusterIP 10.66.91.101 <none> 80/TCP 4m20s
kube-system service/kube-dns ClusterIP 10.66.0.2 <none> 53/UDP,53/TCP,9153/TCP 135d
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-system daemonset.apps/cilium 2 2 2 2 2 kubernetes.io/os=linux 4m20s
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system deployment.apps/cilium-operator 2/2 2 2 4m20s
kube-system deployment.apps/coredns 1/1 1 1 135d
kube-system deployment.apps/hubble-relay 1/1 1 1 4m20s
kube-system deployment.apps/hubble-ui 1/1 1 1 4m20s
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system replicaset.apps/cilium-operator-f45f4975f 2 2 2 4m20s
kube-system replicaset.apps/coredns-78d4595769 1 1 1 135d
kube-system replicaset.apps/hubble-relay-84849f9dd5 1 1 1 4m20s
kube-system replicaset.apps/hubble-ui-79b7f9f4b 1 1 1 4m20s
2.3 验证DNS解析是否正常
# dig -t a www.sohu.com @10.66.0.2
; <<>> DiG 9.16.23 <<>> -t a www.sohu.com @10.66.0.2
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 64003
;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 1408
;; QUESTION SECTION:
;www.sohu.com. IN A
;; ANSWER SECTION:
www.sohu.com. 20 IN CNAME www.sohu.com.dsa.dnsv1.com.
www.sohu.com.dsa.dnsv1.com. 20 IN CNAME best.sched.d0-dk.tdnsdp1.cn.
best.sched.d0-dk.tdnsdp1.cn. 20 IN A 123.125.46.250
;; Query time: 5 msec
;; SERVER: 10.66.0.2#53(10.66.0.2)
;; WHEN: Wed Jul 03 16:14:47 CST 2024
;; MSG SIZE rcvd: 138
标签:hubble,k8s,Kubernetes,--,system,1.29,io,kube,cilium From: https://www.cnblogs.com/cn-jasonho/p/18281134