debian 常用配置
系统初始化
仓库配置及常用软件安装
cp /etc/apt/sources.list /etc/apt/sources.list.bak
sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
sed -i 's/security.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
apt update && apt upgrade && apt install -y bash-completion curl wget vim git man lsb-release sshpass lvm2 ipvsadm
# 配置 vim
if [ ! -f ~/.vimrc ]; then
cp /usr/share/vim/vim*/defaults.vim ~/.vimrc
fi
grep 'set mouse=a' ~/.vimrc
if [ $? -eq 0 ]; then
sed -i 's/set mouse=a/set mouse-=a/' ~/.vimrc
fi
grep 'set paste' ~/.vimrc
if [ $? -ne 0 ]; then
echo 'set paste' >> ~/.vimrc
fi
# 配置自动补全
cat >> /etc/profile <<EOF
if [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
EOF
source /etc/profile
系统常用配置
# 时区配置
timedatectl set-timezone Asia/Shanghai
# 语言环境配置
apt -y install language-pack-zh-hans
localectl list-locales
dpkg-reconfigure locales
localectl set-locale LANG=zh_CN.utf8
# 网卡静态地址配置
cp /etc/network/interfaces /etc/network/interfaces.bak
cat > /etc/network/interfaces <<EOF
source /etc/network/interfaces.d/*
auto lo
auto ens33
iface lo inet loopback
allow-hotplug ens33
iface ens33 inet static
address 10.1.1.50
gateway 10.1.1.2
netmask 255.255.255.0
network 10.1.1.0
broadcast 10.1.1.255
EOF
# DNS
cat > /etc/resolv.conf <<EOF
nameserver 223.5.5.5
nameserver 223.6.6.6
EOF
# hostname 设置
export HOSTNAME=hostname
hostnamectl set-hostname $HOSTNAME
echo "127.0.1.1 $HOSTNAME" >> /etc/hosts
# ssh 证书生成
# ssh-keygen -t rsa -b 2048 -N '' -f /root/.ssh/id_rsa -q > /dev/null
ssh-keygen -t rsa -b 2048 -N '' -m PEM -f /root/.ssh/id_rsa -C "xwjh@outlook.com" -q
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
ssl 相关
# 生成 RSA 私钥
openssl genrsa -out rsa_private_key.pem 2048
# 根据 RSA 私钥生成 RSA 公钥
openssl rsa -in rsa_private_key.pem -pubout -out rsa_public_key.pem
## 或者使用 ssh-keygen 命令根据 RSA 私钥生成 RSA 公钥
# ssh-keygen -f rsa_private_key.pem -e -m pkcs8 > rsa_public_key.pem
# RSA 私钥转 PKCS8 私钥(JAVA需要使用的私钥需要经过PKCS#8编码)
openssl pkcs8 -topk8 -inform PEM -in rsa_private_key.pem -outform PEM -nocrypt -out pkcs8_private_key.pem
# RSA 私钥生成 SSH RSA 公钥
ssh-keygen -y -f rsa_private_key.pem > sshPubkey.pub
# RSA 公钥转 SSH RSA 公钥
ssh-keygen -f rsa_public_key.pem -i -mPKCS8 > rsa_public_key.pub
# SSH RSA 私钥转 RSA 私钥
ssh-keygen -p -m pem -f id_rsa
常用软件快速安装
docker 安装
# docker安装
curl -fsSL https://get.daocloud.io/docker | bash -s docker --mirror Aliyun
# 或者
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
# 设置源
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
# 或者
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://registry.docker-cn.com",
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com"
],
"default-ulimits": {
"nofile": {
"Hard": 64000,
"Name": "nofile",
"Soft": 64000
}
},
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"storage-driver": "overlay2",
"log-level": "warn",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"data-root": "/var/lib/docker"
}
EOF
[docker配置参考](https://docs.docker.com/engine/reference/commandline/dockerd/)
# 重启 docker
systemctl restart docker.service
# 解决 docker info 后的 WARNING: No swap limit support,改完之后重启生效
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' /etc/default/grub
kubernetes 安装
kubeadm 方式安装
# 关闭 swap 分区
swapoff -a && sysctl -w vm.swappiness=0
sed -i 's/.*swap.*/# &/' /etc/fstab
# 官方推荐安装方式(需网络支持)
# https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
apt install -y apt-transport-https ca-certificates curl
# 设置 k8s 源
curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
# 使用阿里源
curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
# 使用腾讯云源
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://mirrors.cloud.tencent.com/kubernetes/apt/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
apt update
apt install -y kubelet kubeadm kubectl
# 使用代理
apt -o Acquire::http::proxy="http://10.1.1.1:7890/" install -y kubelet kubeadm kubectl
# 【可选】阻止软件自动更新
apt-mark hold kubelet kubeadm kubectl
docker镜像拉取代理
mkdir -p /etc/systemd/system/docker.service.d
cat >/etc/systemd/system/docker.service.d/http-proxy.conf <<EOF
[Service]
Environment="HTTP_PROXY=http://10.1.1.1:7890" "HTTPS_PROXY=http://10.1.1.1:7890" "NO_PROXY=localhost,127.0.0.1,docker.com"
EOF
systemctl daemon-reload
systemctl restart docker
镜像下载
# 查看 k8s 对应组件的版本
kubeadm config images list
# 下载所有镜像
kubeadm config images pull
# 下载指定版本镜像
kubeadm config images pull --kubernetes-version=v1.23.3
proxy安装
docker run -d --name=k8s-haproxy --net=host -v /etc/haproxy:/etc/haproxy:ro haproxy:2.5.4-alpine
创建k8s集群
# ipv6
# https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/dual-stack-support/
sysctl -w net.ipv6.conf.all.forwarding=1
# 需要手动添加配置到/etc/sysctl.conf文件中去
# 初始化
kubeadm init --pod-network-cidr=10.244.0.0/16,2001:db8:42:0::/56 --service-cidr=10.96.0.0/16,2001:db8:42:1::/112
# 镜像拉取
kubeadm config images pull --config kubeadm.conf
# k8s 集群初始化
# https://kubernetes.io/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/
export KUBE_PROXY_MODE=ipvs
kubeadm init --apiserver-advertise-address 10.1.1.50 --pod-network-cidr=10.244.0.0/16 | tee kubeadm-init.log
# 其他选项
--image-repository registry.aliyuncs.com/google_containers
--control-plane-endpoint test.com
--service-cidr 10.96.0.0/16 #默认值:"10.96.0.0/12"
--pod-network-cidr=192.168.0.0/16 #默认值: 10.244.0.0
--kubernetes-version v1.23.5
--node-name k8s-m0
--upload-certs
# 加入集群
kubeadm join 10.1.1.100:6443 --token me97s3.dactb8c1jqebbqfu --discovery-token-ca-cert-hash sha256:efd377d0d3d35f3b111221ff9afa9275e52298eac00529cce877f8a58bef1a77
kubeadm token create --print-join-command
kubeadm join 10.1.1.200:8443 --token lhmn7a.43cb8jndawku6mju --discovery-token-ca-cert-hash sha256:97dec9a2a27d472ebf5db46418aa4f504dd226040bb5aa35f46f9ab99bb67589 --certificate-key 5bacaef995897caca3c612f37395274bfdb95a8e95f7ced35c048fbbc4d08c0c
kubeadm join 10.1.1.200:8443 --token lhmn7a.43cb8jndawku6mju --discovery-token-ca-cert-hash sha256:97dec9a2a27d472ebf5db46418aa4f504dd226040bb5aa35f46f9ab99bb67589 --control-plane --certificate-key 5bacaef995897caca3c612f37395274bfdb95a8e95f7ced35c048fbbc4d08c0c
# 去除污点
kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule-
kubectl taint nodes master node-role.kubernetes.io/control-plane:NoSchedule-
# 自动补全
apt install bash-completion
mkdir -p /etc/bash_completion.d
curl -L https://raw.githubusercontent.com/docker/cli/master/contrib/completion/bash/docker -o /etc/bash_completion.d/docker
source /etc/bash_completion.d/docker
kubectl completion bash >/etc/bash_completion.d/kubectl
source /etc/bash_completion.d/kubectl
cilium 安装
# https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/
curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-amd64.tar.gz.sha256sum
tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin
rm cilium-linux-amd64.tar.gz{,.sha256sum}
cilium install
cilium status
cilium hubble enable
export HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
curl -L --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-amd64.tar.gz{,.sha256sum}
sha256sum --check hubble-linux-amd64.tar.gz.sha256sum
tar xzvfC hubble-linux-amd64.tar.gz /usr/local/bin
rm hubble-linux-amd64.tar.gz{,.sha256sum}
cilium hubble port-forward&
hubble status
hubble observe
cilium connectivity test
flannel 插件安装
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
calico安装
参考
https://projectcalico.docs.tigera.io/networking/ip-autodetection
wget https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml
# 或者
wget https://docs.projectcalico.org/manifests/calico.yaml
kubectl apply -f calico.yaml
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth0
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=kubernetes-internal-ip
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=cidr=192.168.200.0/24, 172.15.0.0/24
calicoctl
# 查看主机状态
calicoctl node status
# 系统特性检查
calicoctl node checksystem
# 查看节点
calicoctl get nodes -owide
安装 metrics-server
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# 高可用方式安装
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/high-availability.yaml
# https://kubernetes.io/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs
# 启用已签名的 kubelet 服务证书
# 找到 kube-system 名字空间中名为 kubelet-config-1.23 的 ConfigMap 并编辑之。
# 在该 ConfigMap 中,kubelet 键下面有一个 KubeletConfiguration 文档作为其取值。
# 编辑该 KubeletConfiguration 文档以设置 serverTLSBootstrap: true
# 或者 kubectl patch deployment metrics-server -n kube-system --type 'json' -p '[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--kubelet-insecure-tls"}]'
echo 'serverTLSBootstrap: true' >> /var/lib/kubelet/config.yaml
systemctl restart kubelet
kubectl get csr
# 批准请求
kubectl certificate approve <CSR-名称>
安装helm
# https://helm.sh/zh/docs/
curl -fsSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# 手动下载包并解压到/usr/local/bin/
# https://github.com/helm/helm/releases
wget https://get.helm.sh/helm-v3.8.2-linux-amd64.tar.gz
tar -zxvf helm-v3.8.2-linux-amd64.tar.gz -C /usr/local/bin/ linux-amd64/helm --strip-components=1
tar -zxvf helm-v3.10.1-linux-amd64.tar.gz -C /usr/local/bin/ --strip-components=1 --wildcards --no-anchored "helm"
ingress
# https://kubernetes.github.io/ingress-nginx/deploy/
# https://kubernetes.github.io/ingress-nginx/deploy/baremetal/
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.2.0/deploy/static/provider/baremetal/deploy.yaml
# helm 安装
helm install ingress-nginx --namespace ingress-nginx --create-namespace .
helm upgrade --install ingress-nginx ingress-nginx \
--repo https://kubernetes.github.io/ingress-nginx \
--namespace ingress-nginx --create-namespace \
--set controller.service.type=ClusterIP \
--set controller.kind=DaemonSet \
--set controller.hostNetwork=true \
--set controller.image.digest="" \
--set controller.admissionWebhooks.patch,image.digest="" \
--set controller.ingressClassResource.default=true
# 修改Deployment 为 DaemonSet
# 修改spec.template.spec
# hostNetwork: true
# 删除 ingress-nginx-controller Service
## 常用命令
kubectl create deployment demo --image=httpd --port=80
kubectl expose deployment demo
kubectl create secret tls test.com --key mydomain.key --cert mydomain.crt
kubectl create ingress test.com --class=nginx \
--rule="test.com/*=nginx:80,tls=test.com"
# 如果报错如下:
# root@k8s-m1:~# kubectl apply -f ingress.yaml
# Error from server (InternalError): error when creating "ingress.yaml": Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://ingress-nginx-controller-admission.default.svc:443/networking/v1/ingresses?timeout=10s": x509: certificate is valid for k8s-m2, kubernetes, kubernetes.default, kubernetes.default.svc, kubernetes.default.svc.cluster.local, test.com, not ingress-nginx-controller-admission.default.svc
# 则执行
kubectl delete -A ValidatingWebhookConfiguration ingress-nginx-admission
安装prometheus
helm upgrade -i prometheus prometheus-community/prometheus \
--namespace prometheus \
--set alertmanager.persistentVolume.storageClass="nfs-storage",server.persistentVolume.storageClass="nfs-storage"
# 或者
git clone https://github.com/prometheus-operator/kube-prometheus.git --depth=1
cd kube-prometheus/
kubectl create -f manifests/setup
kubectl create -f manifests
kubectl create ingress prometheus.test.com \
--namespace monitoring \
--class=nginx \
--rule="prometheus.tset.com/*=prometheus-k8s:9090"
kubectl create ingress grafana.test.com \
--namespace monitoring \
--class=nginx \
--rule="grafana.test.com/*=grafana:3000"
常用查询
kubectl get apiservice
kubectl api-versions
kubectl get --raw "/apis/policy/v1/"|jq
kubectl api-resources
kubectl explain <resource>
设置KUBECONFIG
export KUBECONFIG=/etc/kubernetes/admin.conf
使用ipvs
kubectl edit configmap kube-proxy -n kube-system
#设置 mode: "ipvs" # 原来为 mode: ""
kubectl delete pods -n kube-system $(kubectl get pod -n kube-system -l k8s-app=kube-proxy -o jsonpath='{.items[0].metadata.name}')
安装rook-ceph
git clone --single-branch --branch v1.8.8 https://github.com/rook/rook.git
cd rook/deploy/examples
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
kubectl create -f cluster.yaml
kubectl create -f csi/rbd/storageclass.yaml
kubectl create -f toolbox.yaml
alias ceph='kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph'
ceph status
ceph osd status
ceph osd tree
ceph df
# 进入容器执行
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash
rados df
# 设置为默认storageclass存储
kubectl patch storageclass rook-ceph-block -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
# 磁盘初始化
dd if=/dev/zero of=/dev/sdb bs=1M count=10
parted -s /dev/sdb mklabel gpt
sgdisk --zap-all /dev/sdb
# 重启rook-ceph-operator
kubectl delete pod -n rook-ceph rook-ceph-operator-547d8fbcd-qwfjl
etcd
kubectl exec -it -n kube-system etcd-k8s-m1 -- etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key member list
#10eddbc8a1847d92, started, k8s-m2, https://10.1.1.51:2380, https://10.1.1.51:2379, false
#31f91dea18d50e46, started, k8s-m1, https://10.1.1.50:2380, https://10.1.1.50:2379, false
#c245cc818221e3e8, started, k8s-m3, https://10.1.1.52:2380, https://10.1.1.52:2379, false
alias etcdctl='kubectl exec -it -n kube-system etcd-k8s-m1 -- etcdctl --endpoints=https://10.1.1.51:2379,https://10.1.1.50:2379,https://10.1.1.52:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key'
etcdctl endpoint health
etcdctl member list
etcdctl endpoint status --write-out=table
## 配置环境
# export ETCDCTL_API=3
# alias etcdctl='etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key'
## 查看 etcd 集群成员列表
# etcdctl member list
a9b6a1341829d62a, started, k8s-master03, https://172.20.5.13:2380, https://172.20.5.13:2379, false
d1c737a26ea4dd70, started, k8s-master01, https://172.20.5.11:2380, https://172.20.5.11:2379, false
fe2d4a2a33304913, started, k8s-master02, https://172.20.5.12:2380, https://172.20.5.12:2379, false
## 删除 etcd 集群成员 k8s-master02
# etcdctl member remove fe2d4a2a33304913
Member fe2d4a2a33304913 removed from cluster 36067d1f1ca3f1db
## 退出容器
# exit
etcdctl --endpoints=https://127.0.0.1:2379 --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/server.key endpoint health
其他选项
# 清理僵尸进程
0 3 * * * ps -A -ostat,ppid | grep -e '^[Zz]' | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1
标签:kubectl,kubernetes,--,大杂烩,etc,https,com
From: https://www.cnblogs.com/xwjh/p/17141868.html