K8S官网文档
基本概念
节点 | Kubernetes (p2hp.com)
使用 kubectl
来查看节点状态和其他细节信息:
kubectl describe node <节点名称>
容器状态
要检查 Pod 中容器的状态,你可以使用
kubectl describe pod <pod 名称>
其输出中包含 Pod 中每个容器的状态。
pod配置文件详解
创建一个pod 准备pod的资源配置清单
apiVersion: v1 #api文档版本
kind: Pod #资源清单类型 可以是pod deployment statefulset等
metadata: #描述pod的元数据
name: nginx-demo #定义pod的名字
namespace: 'test' #这个pod放在名字为test的namespace中
labels: #定义一些标签,标签是key:value形式,可以自己随便写
app: nginx-demo
type: app
version: 1.0.0
spec: #期望 Pod 按照这里描述的内容进行创建
containers: #定义容器配置
- name: nginx #容器的名称
image: nginx #使用的镜像
imagePullPolicy: IfNotPresent #镜像拉取策略
lifecycle:
preStop:
exec:
command: ["/bin/sh","-c","sleep 50"]
poststart:
exec:
command: ["/bin/sh","-c","echo '<h1>hello world</h1>' > /usr/share/nginx/html/index.html"]
livenessProbe: #存活探针
httpGet: #探测方式
port: 80 #探测端口
path: /api/path #探测请求端口
periodSeconds: 5
timeoutSeconds: 10
failureThreshold: 5
successThreshold: 1
readinessProbe: #就绪探针
httpGet: #探测方式
port: 80 #探测端口
path: /api/path #探测请求端口
periodSeconds: 5
timeoutSeconds: 10
failureThreshold: 5
successThreshold: 1
startupProbe: #应用启动探针
exec:
command:
- sh
- -c
- "sleep 2;echo 'hello' > /tmp/hello"
httpGet: #探测方式
port: 80 #探测端口
path: /api/path #探测请求端口
periodSeconds: 5
timeoutSeconds: 10
failureThreshold: 5
successThreshold: 1
command: #指定容器启动时运行的命令
- nginx
- -g
- 'daemon off;' #nginx -g 'daemon off;'
workingDir: /usr/share/nginx/html #定义容器启动后的工作目录
ports: #容器的端口配置
- name: http
containerPort: 80 #表示单个容器中的网络端口。
protocol: TCP
env:
- name: JVM_OPTS
value: '-Xms128m -Xmx128m'
resources:
requests: #最少需要多少资源
cpu: "100m" #限制CPU最少使用0.1个核心
memory: "64Mi" #限制内存最少使用64MB
limits: #最多需要多少资源
cpu: "200m" #限制cpu最多使用0.2个核心
memory: "128Mi" #限制内存最多使用128MB
restartPolicy: Always
创建pod以及其他命令
kubectl create -f pod.yaml
kubectl get pod -n test
kubectl describe pod nginx-demo -n test
kubectl get po -o wide -n test
kubectl edit po nginx-demo
kubectl get deploy -n kube-system
kubectl exec -it nginx-demo -c nginx -- cat /tmp/hello
kubectl cp xxxx.xxx nginx-demo:/usr/share/nginx/html
资源调度
label和selector
#临时创建label
kubectl label po <pod name> app=hello1 -n ns
#修改已经存在的标签
kubectl label po <pod name> app=hello2 -n ns --overwrite
#selector按照label单值查找节点
kubectl get po -A -l app=hello --show-labels
#查看所有节点的labels
kubectl get po -n ns --show-labels
#匹配多个值
kubectl get po -A -l 'app in (value1,value2,value3)'
kubectl get po -A -l version=1,app=nginx
kubectl get po -l version!=1,app!=nginx
deployment
使用kubectl直接创建deployment
kubectl create deploy nginx-deploy --image=nginx #创建deployment
kubectl get deploy #获得deploy信息
##实际输出信息,可以看出deploy,replicasets和pod的关系
[root@k8s-121 k8s-test]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 1/1 1 1 2m54s
[root@k8s-121 k8s-test]# kubectl get replicaset
NAME DESIRED CURRENT READY AGE
nginx-deploy-9cdd9dbdc 1 1 1 3m3s
[root@k8s-121 k8s-test]# kubectl get po
NAME READY STATUS RESTARTS AGE
nginx-deploy-9cdd9dbdc-wctv9 1/1 Running 0 3m17s
#查看deploy的yaml资源清单
kubectl get deploy nginx-deploy -o yaml
使用资源配置清单创建deployment
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx-deploy
name: nginx-deploy
namespace: test
spec:
replicas: 1
revisionHistoryLimit: 10 #回退版本最多保存的数量,想回退,这个值不能为0
selector:
matchLabels:
app: nginx-deploy
strategy:
rollingUpdate:
maxSurge: 25% #进行滚动更新时,更新的个数最多可以超过期望副本数的个数/比例
maxUnavailable: 25% #进行更新时,最大不可用比例
type: RollingUpdate
template:
metadata:
labels:
app: nginx-deploy
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: nginx
restartPolicy: Always
terminationGracePeriodSeconds: 30
kubectl create -f deploy1.yaml #创建deployment
kubectl describe deploy nginx-deploy -n test
kubectl scale deploy nginx-deploy --replicas=4 -n test #扩容deploy
kubectl apply -f deploy1.yaml --force #修改deployment,修改template:后会触发滚动更新
kubectl edit deploy nginx-deploy -n test
kubectl rollout status deploy nginx-deploy -n test #查看滚动更新的进行状态
滚动更新
kubectl scale deploy nginx-deploy --replicas=4 -n test #扩容deploy
kubectl apply -f deploy1.yaml --force #修改deployment,修改template:后会触发滚动更新
kubectl edit deploy nginx-deploy -n test
kubectl rollout status deploy nginx-deploy -n test #查看滚动更新的进行状态
回滚
kubectl get rs --show-labels -n test #滚动更新时,会创建多个rs用来回滚,template是不一样的
NAME DESIRED CURRENT READY AGE LABELS
nginx-deploy-67c9bd59df 5 5 5 155m app=nginx-deploy,pod-template-hash=67c9bd59df
nginx-deploy-7b9cffd885 0 0 0 174m app=nginx-deploy,pod-template-hash=7b9cffd885
nginx-deploy-86b8db4fd8 0 0 0 22m app=nginx-deploy,pod-template-hash=86b8db4fd8
#实验
#计划更新image版本 last--->1.23.4,结果写成了1.234,更新过程卡住了。
kubectl set image deploy mginx-deploy nginx=nginx:1.234 --record
[root@k8s-121 k8s-test]# kubectl get po -n test
NAME READY STATUS RESTARTS AGE
nginx-deploy-69f6b9c5dc-2q8gn 0/1 ErrImagePull 0 56s
nginx-deploy-7b9cffd885-d7mq4 1/1 Running 0 10m
nginx-deploy-7b9cffd885-nmdb9 1/1 Running 0 10m
nginx-deploy-7b9cffd885-qdngc 1/1 Running 0 3m45s
[root@k8s-121 k8s-test]# kubectl rollout status deploy nginx-deploy -n test
Waiting for deployment "nginx-deploy" rollout to finish: 1 out of 3 new replicas have been updated...
[root@k8s-121 k8s-test]# kubectl get rs -n test --show-labels
NAME DESIRED CURRENT READY AGE LABELS
nginx-deploy-67c9bd59df 0 0 0 174m app=nginx-deploy,pod-template-hash=67c9bd59df
nginx-deploy-69f6b9c5dc 1 1 0 2m47s app=nginx-deploy,pod-template-hash=69f6b9c5dc
nginx-deploy-7b9cffd885 3 3 3 3h13m app=nginx-deploy,pod-template-hash=7b9cffd885
nginx-deploy-86b8db4fd8 0 0 0 41m app=nginx-deploy,pod-template-hash=86b8db4fd8
[root@k8s-121 k8s-test]# kubectl get deploy -n test
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deploy 3/3 1 3 3h20m
#查看回滚的版本
[root@k8s-121 k8s-test]# kubectl rollout history deploy nginx-deploy -n test
deployment.extensions/nginx-deploy
REVISION CHANGE-CAUSE
3 <none>
4 <none>
5 <none>
6 <none>
#查看对应回滚版本的内容
kubectl rollout history deploy nginx-deploy --revision=5 -n test
#回滚,不加--to-revision 默认回滚到上一个版本
[root@k8s-121 k8s-test]# kubectl rollout undo deploy nginx-deploy --to-revision=5 -n test
deployment.extensions/nginx-deploy rolled back
#pod恢复正常了,之前报错的pod被undo了
[root@k8s-121 k8s-test]# kubectl get po -n test
NAME READY STATUS RESTARTS AGE
nginx-deploy-7b9cffd885-d7mq4 1/1 Running 0 29m
nginx-deploy-7b9cffd885-nmdb9 1/1 Running 0 29m
nginx-deploy-7b9cffd885-qdngc 1/1 Running 0 22m
#出错的rs还在
[root@k8s-121 k8s-test]# kubectl get rs -n test --show-labels
NAME DESIRED CURRENT READY AGE LABELS
nginx-deploy-67c9bd59df 0 0 0 3h12m app=nginx-deploy,pod-template-hash=67c9bd59df
nginx-deploy-69f6b9c5dc 0 0 0 20m app=nginx-deploy,pod-template-hash=69f6b9c5dc
nginx-deploy-7b9cffd885 3 3 3 3h31m app=nginx-deploy,pod-template-hash=7b9cffd885
nginx-deploy-86b8db4fd8 0 0 0 59m app=nginx-deploy,pod-template-hash=86b8db4fd8
nginx-test-86788dc8c7 1 1 1 558d app=nginx-test,pod-template-hash=86788dc8c7
暂停与恢复
#暂停
kubectl rollout pause depoly nginx-depoly -n test
#恢复
kubectl rollout resume deploy nginx-deploy -n test
定义有状态服务
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
namespace: test
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
namespace: test
spec:
serviceName: "nginx"
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports: #容器内部要暴露的端口
- containerPort: 80 #具体要暴露的端口
name: web #该端口配置的名字
kubectl replace sts web -f web.yaml
#测试sts能不能访问
[root@k8s-121 k8s-test]# kubectl get sts -n test
NAME READY AGE
web 2/2 14m
[root@k8s-121 k8s-test]# kubectl get pod -n test
NAME READY STATUS RESTARTS AGE
nginx-test-86788dc8c7-8srzf 1/1 Running 22 557d
web-0 1/1 Running 0 5m44s
web-1 1/1 Running 0 3m31s
#在创建一个pod用来访问web
kubectl run -it --image busybox pod-test --restart=Never --rm /bin/sh
or kubectl exec -it pod-test-69df5c4d56-h2sdv -- /bin/sh
ping web-0.nginx
扩容缩容
kubectl scale statefulset web --replicas=3 -n test
kubectl patch statefulset web -p '{"spec":{"replicas":2}}'
镜像更新
#sts 不支持直接更新image,需要patch来间接实现
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "nginx:1.23.4"}]' -n test
kubectl rollout history sts -n test
kubectl rollout history sts web --revision=0 -n test
滚动更新
#kubectl edit sts web -n test
...
updateStrategy:
rollingUpdate:
partition: 0 #可以开启金丝雀发布,更新操作只更新大于等于这个数值的pod
type: RollingUpdate
...
#kubectl scale statefulset web --replicas=3 -n test
[root@k8s-121 ~]# kubectl get po -n test
NAME READY STATUS RESTARTS AGE
nginx-test-86788dc8c7-8srzf 1/1 Running 23 558d
web-0 1/1 Running 0 41m
web-1 1/1 Running 0 43m
web-2 1/1 Running 0 82s
web-3 1/1 Running 0 45s
web-4 1/1 Running 0 24s
#kubectl describe pod web-4 -n test
Image: nginx:1.23.4
#把partition: 0 改成3
#kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "nginx"}]' -n test
#kubectl describe pod web-4 -n test
Image: nginx
#要扩大更新,把把partition: 3 改成0
Ondelete 更新
#kubectl edit sts web -n test
...
updateStrategy:
type: OnDelete #修改template后,不会开始更新,在pod删除后,开始更新
...
级联删除和非级联删除
kubectl delete sts web -n test # 级联删除
kubectl deletc sts web --cascade=false # 非级联删除
DaemonSet守护进程
资源配置清单
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
spec:
selector:
matchLabels:
app: logging
template:
metadata:
labels:
app: logging
id: fluentd
name: fluentd
spec:
containers:
- name: fluentd-es
image: agilestacks/fluentd-elasticsearch:v1.3.0
env:
- name: FLUENTD_ARGS
value: -qq
volumeMounts:
- name: containers
mountPath: /var/lib/docker/containers
- name: varlog
mountPath: /varlog
volumes:
- hostPath:
path: /var/lib/docker/containers
name: containers
- hostPath:
path: /var/log/
name: varlog
创建DaemonSet以及管理命令
kubectl create -f fluentd.yaml
[root@k8s-121 k8s-test]# kubectl get daemonset
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
fluentd 2 2 0 2 0 <none> 2m1s
[root@k8s-121 k8s-test]# kubectl get po
NAME READY STATUS RESTARTS AGE
fluentd-n7tdh 0/1 ContainerCreating 0 2m37s
fluentd-zl5hh 0/1 ContainerCreating 0 2m37s
[root@k8s-121 k8s-test]# kubectl get po -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
fluentd-n7tdh 0/1 ContainerCreating 0 2m56s <none> k8s-122.host.com <none> <none>
fluentd-zl5hh 0/1 ContainerCreating 0 2m56s <none> k8s-121.host.com <none> <none>
[root@k8s-121 k8s-test]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-121.host.com Ready master,node 571d v1.15.5
k8s-122.host.com Ready master,node 571d v1.15.5
[root@k8s-121 k8s-test]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8s-121.host.com Ready master,node 571d v1.15.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-121.host.com,kubernetes.io/os=linux,node-role.kubernetes.io/master=,node-role.kubernetes.io/node=
k8s-122.host.com Ready master,node 571d v1.15.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-122.host.com,kubernetes.io/os=linux,node-role.kubernetes.io/master=,node-role.kubernetes.io/node=
[root@k8s-121 k8s-test]# kubectl label no k8s-122.host.com type=microservices
[root@k8s-121 k8s-test]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
k8s-121.host.com Ready master,node 571d v1.15.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-121.host.com,kubernetes.io/os=linux,node-role.kubernetes.io/master=,node-role.kubernetes.io/node=
k8s-122.host.com Ready master,node 571d v1.15.5 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s-122.host.com,kubernetes.io/os=linux,node-role.kubernetes.io/master=,node-role.kubernetes.io/node=,type=microservices
[root@k8s-121 k8s-test]# kubectl edit ds fluentd
...
spec:
nodeSelector: # 添加
type: microservices #添加
containers:
- env:
- name: FLUENTD_ARGS
value: -qq
...
[root@k8s-121 k8s-test]# kubectl get ds
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
fluentd 1 1 1 1 1 type=microservices 22m
nginx-ds 2 2 2 2 2 <none> 571d
nginx-ds-svc 2 2 2 2 2 <none> 567d
[root@k8s-121 k8s-test]# kubectl get po -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
fluentd-m5ztw 1/1 Running 0 51s 172.7.122.6 k8s-122.host.com <none> <none>
HPA自动扩缩容
Pod自动扩容,可以根据CPU使用率或自定义项目,自动对Pod进行扩缩容
准备一个dp资源清单
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx-deploy
name: nginx-deploy
namespace: test
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx-deploy
strategy:
rollingUpdate:
maxSurge: 25% #进行滚动更新时,更新的个数最多可以超过期望副本数的个数/比例
maxUnavailable: 25% #进行更新时,最大不可用比例
type: RollingUpdate
template:
metadata:
labels:
app: nginx-deploy
spec:
containers:
- image: nginx:1.23.4
imagePullPolicy: IfNotPresent
name: nginx
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "200m"
memory: "128Mi"
restartPolicy: Always
terminationGracePeriodSeconds: 30
配置HPA
[root@k8s-121 k8s-test]# kubectl autoscale deploy nginx-deploy --cpu-percent=20 --min=2 --max=5 -n test
[root@k8s-121 k8s-test]# kubectl top pod -n test
NAME CPU(cores) MEMORY(bytes)
nginx-deploy-57dbd8d4f9-tfrwx 0m 6Mi
nginx-test-86788dc8c7-8srzf 0m 3Mi
#压力测试
# 准备一个svc资源清单
apiVersion: v1
kind: Service
metadata:
namespace: test
name: nginx-svc
labels:
app: nginx
spec:
selector:
app: nginx-deploy
ports:
- port: 80
targetPort: 80
name: web
type: NodePort
[root@k8s-121 k8s-test]# kubectl get svc -n test
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-svc02 NodePort 192.168.46.79 <none> 80:4786/TCP 2m22s
[root@k8s-121 k8s-test]# curl 192.168.46.79
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
#可以访问
while true; do wget -q -O- http://192.168.46.79 > /dev/null; done
服务发现-service
service的资源清单
apiVersion: v1
kind: Service #资源类型为service类型
metadata:
namespace: test #资源的名称空间
name: nginx-svc #资源的名字
labels:
app: nginx #资源本身的标签
spec:
selector: #选择器,匹配那些pod会被service代理
app: nginx-deploy #所有匹配到这个标签的pod,都可以通过该service进行访问
ports: #端口映射
- port: 80 #service自己的端口,在使用内网IP访问时使用
targetPort: 80 #目标pod的端口
nodePort: 32000 # 指定绑定的端口
name: web # 端口的名子
type: NodePort #type有4中:ClusterIP ExternalName NodePort LoadBalancer
#NodePort :随机启动一个端口(30000-32767),映射到ports中的端口,该端口直接绑定在node上面,且集群中每一个node都会绑定这个端口
#也可以用于将服务暴露给外部访问,但是这种方式实际生产环境不推荐,效率低下,而且Service是四层负载,一般测试的时候使用NodePort
代理K8s外部服务
实现方法:
- 编写service配置文件时,不指定selector属性
- 自己创建endpoint
apiVersion: v1
kind: Service
metadata:
namespace: test
name: nginx-svc-external
labels:
app: nginx-ex
spec:
ports:
- port: 80
targetPort: 80
name: web
type: ClusterIP
创建endpoint
apiVersion: v1
kind: Endpoints
metadata:
labels:
app: nginx-svc-external
name: nginx-svc-external
namespace: test
subsets:
- addresses:
- ip: <target ip> # 目标 ip地址
ports:
- name: web # 这里要跟svc中ports配置的name一致
port: 80
protocal: TCP
反向代理外部域名
apiVersion: v1
kind: Service
metadata:
labels:
app: wolfcode-external-domain
name: wolfcode-external-damain
spec:
type: ExternalName
externalName: www.wolfcode.cn
常用类型
- ClusterIP 只能在集群内部使用,不配置类型的话默认就是ClusterIP
- ExternalName 返回定义的CNAME别名,可以配置为域名
- NodePort 随机启动一个端口(30000-32767),映射到ports中的端口,该端口直接绑定在node上面,且集群中每一个node都会绑定这个端口
- LoadBanlance 使用云服务商提供的负载均衡器服务
服务发现-Ingress
下载helm
#下载helm 安装helm
[root@k8s-121 ~]# wget https://get.helm.sh/helm-v3.2.3-linux-amd64.tar.gz
[root@k8s-121 linux-amd64]# tar -zxvf helm-v3.2.3-linux-amd64.tar.gz -C /opt/
[root@k8s-121 linux-amd64]# mv /opt/linux-amd64/helm /usr/local/bin/helm
#添加仓库
[root@k8s-121 /]# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
"ingress-nginx" has been added to your repositories
#查看仓库
[root@k8s-121 /]# helm repo list
NAME URL
ingress-nginx https://kubernetes.github.io/ingress-nginx
#搜索安装包
[root@k8s-121 /]# helm search repo ingress-nginx
NAME CHART VERSION APP VERSION DESCRIPTION
ingress-nginx/ingress-nginx 4.6.1 1.7.1 Ingress controller for Kubernetes using NGINX a...
#下载包
[root@k8s-121 /]# helm pull ingress-nginx/ingress-nginx
[root@k8s-121 opt]# ls -l |grep ingress
-rw-r--r-- 1 root root 45393 May 25 15:40 ingress-nginx-4.6.1.tgz
修改配置
[root@k8s-121 ingress-nginx]# vim values.yaml
...
controller:
name: controller
image:
## Keep false as default for now!
chroot: false
registry: registry.cn-hangzhou.aliyuncs.com #修改
image: google_containers/nginx-ingress-controller #修改
## for backwards compatibility consider setting the full image url via the repository value below
## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail
## repository:
tag: "v1.7.1"
#digest: sha256:7244b95ea47bddcb8267c1e625fb163fc183ef55448855e3ac52a7b260a60407
#digestChroot: sha256:e35d5ab487861b9d419c570e3530589229224a0762c7b4d2e2222434abb8d988
pullPolicy: IfNotPresent
# www-data -> uid 101
runAsUser: 101
allowPrivilegeEscalation: true
...
#搜索kube-webhook后
image:
registry: registry.k8s.io
image: ingress-nginx/kube-webhook-certgen
tag: v20230312-helm-chart-4.5.2-28-g66a760794
digest: sha256:01d181618f270f2a96c04006f33b2699ad3ccb02da48d0f89b22abce084b292f
pullPolicy: IfNotPresent
#改为
image:
registry: registry.cn-hangzhou.aliyuncs.com
image: google_containers/kube-webhook-certgen
tag: "v1.7.1"
#digest: sha256:01d181618f270f2a96c04006f33b2699ad3ccb02da48d0f89b22abce084b292f
#pullPolicy: IfNotPresent
#搜索DaemonSet,找到
nodeSelector:
kubernetes.io/os: linux
#修改为
nodeSelector:
kubernetes.io/os: linux
ingress: "true"
#搜索hostNetwork
改为 true
#搜索dnsPolicy
改为 ClusterFirstWithHostNet
#搜索LoadBalancer
改为 ClusterIP
#搜索admissionwebhooks
将admissionwebhooks.enable修改为 false
安装ingress-nginx
#创建一个namespace
kebuctl create ns ingress-nginx
#为需要安装ingress的节点加上标签
kubectl label node k8s-121.host.com ingress=true
kubectl label node k8s-122.host.com ingress=true
#安装ingress-nginx
[root@k8s-121 ingress-nginx]# helm install ingress-nginx -n ingress-nginx .
Error: Kubernetes cluster unreachable
[root@k8s-121 ingress-nginx]# export KUBERNETES_MASTER=http://127.0.0.1:8080
[root@k8s-121 ingress-nginx]# helm install ingress-nginx -n ingress-nginx .
Error: chart requires kubeVersion: >=1.20.0-0 which is incompatible with Kubernetes v1.15.5
使用ingress-nginx
vim ingress.yaml
apiVersion: networking.k8s.io/v1
kind: ingress
metadata:
name: ingress-nginx
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules: #ingress规则配置,可以配置多个
- host: k8s.wolfcode.cn # 域名配置,可以使用通配符 *
http:
paths: #相当于nginx的location配置,可以配置多个
- backend:
service:
name: nginx-svc #代理到那个service
port:
number: 80 #service的端口
path: / # 相当于nginx中的location的路径前缀匹配
pathtype: Prefix
# pathtype 路径类型,按照路径类型进行匹配,ImplementationSpecific 需要指定 IngressClass。具体配置规则以IngressClass中的规则为准。Exact 精确匹配,URL需要与path完全匹配。Prefix 以 / 作为分隔符来进行前缀匹配
配置与存储
配置管理
ConfigMap
创建ConfigMap
[root@k8s-121 k8s-test]# kubectl create configmap -h 查看创建configmap帮助
[root@k8s-121 k8s-test]# mkdir config_dir
[root@k8s-121 config_dir]# kubectl create configmap my-config --from-file=/opt/k8s-test/config_dir
configmap/my-config created
[root@k8s-121 config_dir]# kubectl get cm
NAME DATA AGE
my-config 2 6s
[root@k8s-121 config_dir]# kubectl describe cm my-config
[root@k8s-121 config_dir]# kubectl create configmap my-key-value-config --from-literal=username=root --from-literal=password=test1234
[root@k8s-121 config_dir]# kubectl create configmap test-env-config --from-literal=JAVA_OPTS_TEST='-Xms128m -Xmx128m' --from-literal=APP_NAME=springboot-env-test
使用configmap
创建一个pod
apiVersion: v1
kind: Pod
metadata:
name: cm-test
spec:
containers:
- name: cm-test
image: alpine
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","env;sleep 3600"]
env:
- name: JAVA_VM_OPTS
valueFrom:
configMapKeyRef:
name: test-env-config #configmap的名字
key: JAVA_OPTS_TEST #从name的configmap中获取名字为key的value,将其赋值本地环境变量JAVA_VM_OPTS
- name: APP
valueFrom:
configMapKeyRef:
name: test-env-config
key: APP_NAME
[root@k8s-121 k8s-test]# kubectl create -f envpod.yaml
[root@k8s-121 k8s-test]# kubectl exec -it cm-test -- /bin/sh
[root@k8s-121 k8s-test]# kubectl logs -f cm-test
[root@k8s-121 k8s-test]# docker exec -it f2380c71ee55 /bin/sh
使用 --from-file 形式的cm
apiVersion: v1
kind: Pod
metadata:
name: cm-configfile-test
spec:
containers:
- name: cm-configfile-test
image: alpine
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","env;sleep 3600"]
volumeMounts: #加载数据卷
- name: db-config
mountPath: "/usr/local/mysql/conf" #将数据卷加载到目录
readOnly: true #是否只读,默认是false
volumes: # 数据卷挂载,两种形式configmap secret
- name: db-config #数据卷的名字,随意设置
configMap: #数据卷类型为configMap
name: my-config #configMap 的名字,必须跟想要加载的configMap相同
items: #对configMap中的key进行映射,如果不指定,默认会将configmap中所有key全部转换为一个同名的文件
- key: "db.properties" #configMap中的key
path: "db.properties" #将该key的值转换为文件
Secret的应用
创建
[root@k8s-121 k8s-test]# kubectl create secret generic orig-secret --from-literal=username=admin --from-literal=password=TEst\!234
[root@k8s-121 k8s-test]# kubectl create secret generic orig-secret --from-literal=username=admin --from-literal=password='TEst!234' 有特殊字符使用单引号引起来
[root@k8s-121 k8s-test]# kubectl describe secret orig-secret
[root@k8s-121 k8s-test]#kubectl create secret docker-registry my-docker-sec --docker-username=user --docker-password=password --docker-email=email
apiVersion: v1
kind: Pod
metadata:
name: cm-configfile-test
spec:
imagePullSecrets: ##
- name: my-docker-sec
containers:
- name: cm-configfile-test
image: xxx.xxx.xx.xx/xxx/xxx:v1.2.3
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","env;sleep 3600"]
volumeMounts: #加载数据卷
- name: db-config
mountPath: "/usr/local/mysql/conf" #将数据卷加载到目录
readOnly: true #是否只读,默认是false
volumes: # 数据卷挂载,两种形式configmap secret
- name: db-config #数据卷的名字,随意设置
configMap: #数据卷类型为configMap
name: my-config #configMap 的名字,必须跟想要加载的configMap相同
items: #对configMap中的key进行映射,如果不指定,默认会将configmap中所有key全部转换为一个同名的文件
- key: "db.properties" #configMap中的key
path: "db.properties" #将该key的值转换为文件
SubPath使用
用于把容器中的文件暴露到容器外
apiVersion: v1
kind: Pod
metadata:
name: cm-configfile-test
spec:
containers:
- name: cm-configfile-test
image: alpine
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","env;sleep 3600"]
volumeMounts: #加载数据卷
- name: nginx-conf
mountPath: /etc/nginx/nginx.conf
subPath: etc/nginx/nginx.conf
volumes: # 数据卷挂载,两种形式configmap secret
- name: nginx-conf #数据卷的名字,随意设置
configMap: #数据卷类型为configMap
name: nginx-conf-cm #configMap 的名字,必须跟想要加载的configMap相同
items: #对configMap中的key进行映射,如果不指定,默认会将configmap中所有key全部转换为一个同名的文件
- key: nginx.conf #configMap中的key
path: etc/nginx/nginx.conf
配置的热更新
- 使用kubectl edit更改
- 使用kubectl apply -f
# 创建一个cm yaml配置清单
kubectl create cm cm-test --from-file=/opt/k8s-test/config_dir --dry-run -o yaml
#修改cm配置
kubectl create cm cm-test --from-file=/opt/k8s-test/config_dir --dry-run -o yaml|kubectl replace -f-
不可变的Secret和ConfigMap
对于一些敏感的配置文件,在上线后有时是不允许修改的,此时在配置configMap的时候可以设置immutable true 来禁止修改,
持久化存储管理
Volumes
将节点上的文件或目录挂载到pod上,此时该目录会变成持久化存储目录,即使pod被删除后重启,也可以重新加载到该目录,该目录下的文件不会丢失