首页 > 其他分享 >安装部署velero及资源配置及secert的tls认证-第五周

安装部署velero及资源配置及secert的tls认证-第五周

时间:2022-12-31 21:55:45浏览次数:49  
标签:tls velero secert name -- nginx nfs 80 metadata

安装minio

mkdir -p /data/minio

docker pull minio/minio:RELEASE.2022-04-12T06-55-35Z

docker run --name minio -p 9000:9000 -p 9999:9999 -d --restart=always -e "MINIO_ROOT_USER=admin" -e "MINIO_ROOT_PASSWORD=12345678" -v /data/minio/data:/data minio/minio:RELEASE.2022-04-12T06-55-35Z server /data --console-address '0.0.0.0:9999'

登陆Minio建桶velerodata

部署velero

在master01

cd /usr/local/src/
wget https://github.com/vmware-tanzu/velero/releases/download/v1.8.1/velero-v1.8.1-linux-amd64.tar.gz
tar xvf velero-v1.8.1-linux-amd64.tar.gz
cp velero-v1.8.1-linux-amd64/velero  /usr/local/bin/
velero  --help

配置velero认证环境:

mkdir  /data/velero -p

准备user-csr文件:

vim awsuser-csr.json
{
  "CN": "awsuser",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}


准备证书签发环境:

root@k8s-master1:/data/velero# apt install golang-cfssl
https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 
https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 
https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
root@k8s-master1:/data/velero# mv cfssl-certinfo_1.6.1_linux_amd64 cfssl-certinfo
root@k8s-master1:/data/velero# mv cfssl_1.6.1_linux_amd64 cfssl
root@k8s-master1:/data/velero# mv cfssljson_1.6.1_linux_amd64 cfssljson
root@k8s-master1:/data/velero# cp cfssl-certinfo cfssl cfssljson /usr/local/bin/
root@k8s-master1:/data/velero# chmod  a+x /usr/local/bin/cfssl* 

执行证书签发

/usr/local/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca-config.json -profile=kubernetes ./awsuser-csr.json | cfssljson -bare awsuser

验证证书:

root@k8s-master1:/data/velero# ll
total 20
drwxr-xr-x 2 root root  114 Dec 14 19:55 ./
drwxr-xr-x 3 root root   20 Dec 14 17:18 ../
-rw-r--r-- 1 root root  220 Dec 14 19:11 awsuser-csr.json
-rw------- 1 root root 1675 Dec 14 19:55 awsuser-key.pem
-rw-r--r-- 1 root root  997 Dec 14 19:55 awsuser.csr
-rw-r--r-- 1 root root 1387 Dec 14 19:55 awsuser.pem
-rw-r--r-- 1 root root   69 Dec 14 19:10 velero-auth.txt

分发证书到api-server证书路径

cp awsuser-key.pem /etc/kubernetes/ssl/
cp awsuser.pem /etc/kubernetes/ssl/

生成集群认证config文件:

export KUBE_APISERVER="https://192.168.44.12:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=./awsuser.kubeconfig

设置客户端证书认证

kubectl config set-credentials awsuser \
--client-certificate=/etc/kubernetes/ssl/awsuser.pem \
--client-key=/etc/kubernetes/ssl/awsuser-key.pem \
--embed-certs=true \
--kubeconfig=./awsuser.kubeconfig

设置上下文参数

kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=awsuser \
--namespace=velero-system \
--kubeconfig=./awsuser.kubeconfig

设置默认上下文

kubectl config use-context kubernetes --kubeconfig=awsuser.kubeconfig

k8s集群中创建awsuser账户

kubectl create clusterrolebinding awsuser --clusterrole=cluster-admin --user=awsuser

创建namespace

kubectl create ns velero-system

执行安装

velero --kubeconfig  ./awsuser.kubeconfig \
	install \
    --provider aws \
    --plugins velero/velero-plugin-for-aws:v1.3.1 \
    --bucket velerodata  \
    --secret-file ./velero-auth.txt \
    --use-volume-snapshots=false \
	--namespace velero-system \
--backup-location-config region=minio,s3ForcePathStyle="true",s3Url=http://192.168.44.10:9000

kubectl logs deployment/velero -n velero-system

资源清单编写

deployment

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment-ng20
spec:
  replicas: 2
  selector:
    matchLabels:
      app: ng-deploy-20
  template:
    metadata:
      labels:
        app: ng-deploy-20
    spec:
      containers:
      - name: ng-deploy-20
        image: nginx:1.20.2
        ports:
        - containerPort: 80

kubectl get replicaset

回滚

kubectl rollout undo deployment/nginx-deployment

创建svc

用于pod的负载均衡

apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-20
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: ng-deploy-20

svc-nodeport

apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-20-nodeport
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30012
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-20

configmap

configmap配置信息和镜像解耦,实现方式为将配置信息放到configmap对象中,然后在pod的中作为volume挂载到pod中,从而实现导入配置的目的

用于在pod中定义配置文件

注意事项

  • configmap需要在pod使用它之前创建

  • pod只能使用位于同一个namespace的Configmap,及Configmap不能跨namespace使用

  • 通常用于非安全加密的配置场景

  • configmap通常是小于1MB的配置

configmap用于nginx配置文件

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
 default: |
    server {
       listen       80;
       server_name  www.mysite.com;
       index        index.html index.php index.htm;

       location / {
           root /data/nginx/html;
           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }


---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-config-80
  template:
    metadata:
      labels:
        app: ng-deploy-config-80
    spec:
      containers:
      - name: ng-deploy-config-80
        image: nginx:1.20.0
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /data/nginx/html
          name: nginx-static-dir
        - name: nginx-config
          mountPath:  /etc/nginx/conf.d
      volumes:
      - name: nginx-static-dir
        hostPath:
          path: /data/nginx
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30019
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-config-80

configmap用于环境变量

metadata:
  name: nginx-config-env
data:
  username: "user1"
  password: "12345678" 

---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-config-80
  template:
    metadata:
      labels:
        app: ng-deploy-config-80
    spec:
      containers:
      - name: ng-deploy-config-80
        image: nginx:1.20.0
        env:
        - name: MY_USERNAME
          valueFrom:
            configMapKeyRef:
              name: nginx-config-env
              key: username
        - name: MY_PASSWORD
          valueFrom:
            configMapKeyRef:
              name: nginx-config-env
              key: password
        ports:
        - containerPort: 80

volume存储卷

emptyDir 存放临时数据

emptydir

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.17.2
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        emptyDir: {}

hostpath

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx 
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /cache
          name: cache-volume
      volumes:
      - name: cache-volume
        hostPath:
          path: /data/kubernetes

NFS实现pod数据持久化的使用方式

nfs

安装nfs-server

apt install nfs-server

编辑配置文件

vim /etc/exports
/data/k8sdata *(rw,no_root_squash)

重启nsf-server

systemctl restart nfs-server
#验证
showmount -e 192.168.44.11

nfs挂载到pod上

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.17.2
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html/mysite
          name: my-nfs-volume
      volumes:
      - name: my-nfs-volume
        nfs:
          server: 192.168.44.11
          path: /data/k8sdata
---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30016
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80

挂nfs多个目录

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment-sitetest
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-81
  template:
    metadata:
      labels:
        app: ng-deploy-81
    spec:
      containers:
      - name: ng-deploy-81
        image: nginx:1.17.2
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /usr/share/nginx/html/pool1
          name: my-nfs-volume-pool1
        - mountPath: /usr/share/nginx/html/pool2
          name: my-nfs-volume-pool2
      volumes:
      - name: my-nfs-volume-pool1
        nfs:
          server: 192.168.44.11
          path: /data/k8sdata/pool1
      - name: my-nfs-volume-pool2
        nfs:
          server: 192.168.44.11
          path: /data/k8sdata/pool2
---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-81
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30017
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-81

实现基于Secret实现nginx的tls认证

创建证书

openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 3560 -nodes -subj '/CN=www.ca.com'

openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=www.jack.com'

openssl x509 -req -sha256 -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt


创建tls-secret

kubectl create secret tls jack-tls-key --cert=./server.crt --key=./server.key

基于tls-secret构建nginx证书

apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
 default: |
    server {
       listen       80;
       server_name  www.mysite.com;
       listen 443 ssl;
       ssl_certificate /etc/nginx/conf.d/certs/tls.crt;
       ssl_certificate_key /etc/nginx/conf.d/certs/tls.key;

       location / {
           root /usr/share/nginx/html; 
           index index.html;
           if ($scheme = http ){  #未加条件判断,会导致死循环
              rewrite / https://www.mysite.com permanent;
           }  

           if (!-e $request_filename) {
               rewrite ^/(.*) /index.html last;
           }
       }
    }

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: jack-myapp-fronttend-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
      - name: myserver-myapp-frontend
        image: nginx:1.20.2-alpine
        ports:
          - containerPort: 80
        volumeMounts:
          - name: nginx-config
            mountPath: /etc/nginx/conf.d
          - name: jack-tls-key
            mountPath: /etc/nginx/conf.d/certs
      volumes:
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf
      - name: jack-tls-key
        secret:
          secretName: jack-tls-key 
---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30020
    protocol: TCP
  - name: https
    port: 443
    targetPort: 443
    nodePort: 30021
    protocol: TCP
  selector:
    app: myserver-myapp-frontend

私有仓库镜像的下载认证

创建存储docker registry的认证信息

kubectl create secret docker-registry mypullsecret --docker-server=harbor.jackedu.cn --docker-username=admin --docker-password=123456

#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myserver-myapp-frontend-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
      - name: myserver-myapp-frontend
        image: harbor.jackedu.cn/secert/nginx:1.16.1-alpine-perl 
        ports:
          - containerPort: 80
      imagePullSecrets:
        - name: mypullsecret

---
apiVersion: v1
kind: Service
metadata:
  name: myserver-myapp-frontend
spec:
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30022
    protocol: TCP
  type: NodePort
  selector:
    app: myserver-myapp-frontend 

基于NFS的PV动态创建

创建rbac

apiVersion: v1
kind: Namespace
metadata:
  name: nfs
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

创建storageclass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner # or choose another name, must match deployment's env PROVISIONER_NAME'
reclaimPolicy: Retain #PV的删除策略,默认为delete,删除PV后立即删除NFS server的数据
mountOptions:
  #- vers=4.1 #containerd有部分参数异常
  #- noresvport #告知NFS客户端在重新建立网络连接时,使用新的传输控制协议源端口
  - noatime #访问文件时不更新文件inode中的时间戳,高并发环境可提高性能
parameters:
  #mountOptions: "vers=4.1,noresvport,noatime"
  archiveOnDelete: "true"  #删除pod时保留pod数据,默认为false时为不保留数据

在nfs添加/data/volumes目录

root@k8s-harbor:/data# cat /etc/exports 
# /etc/exports: the access control list for filesystems which may be exported
#		to NFS clients.  See exports(5).
#
# Example for NFSv2 and NFSv3:
# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
#
# Example for NFSv4:
# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
#
/data/k8sdata *(rw,no_root_squash)
/data/volumes *(rw,no_root_squash)

重新加载nfs-server

systemctl reload nfs-server
showmount -e
Export list for k8s-harbor:
/data/volumes *
/data/k8sdata *

基于storageclass创建pvc

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: myserver-myapp-dynamic-pvc
  namespace: myserver
spec:
  storageClassName: managed-nfs-storage #调用的storageclass 名称
  accessModes:
    - ReadWriteMany #访问权限
  resources:
    requests:
      storage: 500Mi #空间大小

通过pvc创建pod

kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-myapp 
  name: myserver-myapp-deployment-name
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: myserver-myapp-frontend
  template:
    metadata:
      labels:
        app: myserver-myapp-frontend
    spec:
      containers:
        - name: myserver-myapp-container
          image: nginx:1.20.0 
          #imagePullPolicy: Always
          volumeMounts:
          - mountPath: "/usr/share/nginx/html/statics"
            name: statics-datadir
      volumes:
        - name: statics-datadir
          persistentVolumeClaim:
            claimName: myserver-myapp-dynamic-pvc 

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-myapp-service
  name: myserver-myapp-service-name
  namespace: myserver
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    targetPort: 80
    nodePort: 30080
  selector:
    app: myserver-myapp-frontend

将图片直接放到nfs服务器,在pod上可以直接访问

标签:tls,velero,secert,name,--,nginx,nfs,80,metadata
From: https://www.cnblogs.com/jackwu81/p/17017301.html

相关文章