Namespace
名称空间,为资源对象的名称提供了限定条件或作用范围,它为使用同一集群的 多个团队或项目提供了逻辑上的隔离机制,降低或消除了资源对象名称冲突的可能性。
namespace命令空间,后面简称ns。在K8s上面,大部分资源都受ns的限制,来做资源的隔离,少部分如pv,clusterRole等不受ns控制
#查看有哪些ns
[root@k8s-master1 ~]# kubectl get ns
NAME STATUS AGEba
default Active 8d
#默认,创建的资源如果不指定ns那么就存放到这里
kube-node-lease Active 8d
#系统级别的ns
kube-public Active 8d
#系统级别的ns
kube-system Active 8d
#不要存放业务服务,这里面是跑系统的
kubernetes-dashboard Active 8d
#dashboard,ui界面
#查看某个ns里面的pod
[root@k8s-master1 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-6bd6b69df9-nxn2j 1/1 Running 13 (23h ago) 8d
#插件calico,用来给pod分配ip
calico-node-2mbx5 1/1 Running 8 (23h ago) 8d
calico-node-gplc5 1/1 Running 8 (23h ago) 8d
calico-node-hvmm6 1/1 Running 8 (23h ago) 8d
calico-node-kmtth 1/1 Running 8 (23h ago) 8d
calico-node-ktnts 1/1 Running 8 (23h ago) 8d
calico-node-s9shv 1/1 Running 8 (23h ago) 8d
calico-typha-77fc8866f5-9wg6q 1/1 Running 8 (23h ago) 8d
coredns-567c556887-422mn 1/1 Running 8 (23h ago) 8d
#做k8s内部dns解析用的
coredns-567c556887-xgxtz 1/1 Running 8 (23h ago) 8d
etcd-k8s-master1 1/1 Running 8 (23h ago) 8d
etcd-k8s-master2.guoguo.com 1/1 Running 8 (23h ago) 8d
etcd-k8s-master3.guoguo.com 1/1 Running 8 (23h ago) 8d
kube-apiserver-k8s-master1 1/1 Running 19 (6h2m ago) 8d
kube-apiserver-k8s-master2.guoguo.com 1/1 Running 11 (23h ago) 8d
kube-apiserver-k8s-master3.guoguo.com 1/1 Running 11 (23h ago) 8d
kube-controller-manager-k8s-master1 1/1 Running 9 (23h ago) 8d
kube-controller-manager-k8s-master2.guoguo.com 1/1 Running 9 (6h4m ago) 8d
kube-controller-manager-k8s-master3.guoguo.com 1/1 Running 9 (23h ago) 8d
kube-proxy-cmfbh 1/1 Running 8 (23h ago) 8d
kube-proxy-ds4hd 1/1 Running 8 (23h ago) 8d
kube-proxy-h2zgf 1/1 Running 8 (23h ago) 8d
kube-proxy-hrn7q 1/1 Running 8 (23h ago) 8d
kube-proxy-wlt5r 1/1 Running 8 (23h ago) 8d
kube-proxy-z46nk 1/1 Running 8 (23h ago) 8d
kube-scheduler-k8s-master1 1/1 Running 9 (23h ago) 8d
kube-scheduler-k8s-master2.guoguo.com 1/1 Running 8 (23h ago) 8d
kube-scheduler-k8s-master3.guoguo.com 1/1 Running 10 (6h3m ago) 8d
metrics-server-684999f4d6-c584b 1/1 Running 8 (23h ago) 8d
#提供所有pod的资源使用,这个用kubectl top pods -n kube-system 可以看到占用了多少资源
#这个服务可以结合监控普罗米修斯来获取值
[root@k8s-master1 ~]# kubectl top pods -n kube-system
NAME CPU(cores) MEMORY(bytes)
calico-kube-controllers-6bd6b69df9-nxn2j 3m 42Mi
#"m"相当于是cpu的微核,3m相当于1核cpu除以1000乘3
#1核心等于1000微核
#3m也就是0.003核心
#300m的话就是0.3核心,3000就是3核心
calico-node-2mbx5 27m 136Mi
#136MB
calico-node-gplc5 25m 125Mi
calico-node-hvmm6 31m 119Mi
calico-node-kmtth 30m 129Mi
calico-node-ktnts 25m 122Mi
calico-node-s9shv 26m 130Mi
calico-typha-77fc8866f5-9wg6q 4m 31Mi
coredns-567c556887-422mn 1m 22Mi
coredns-567c556887-xgxtz 1m 24Mi
etcd-k8s-master1 48m 118Mi
etcd-k8s-master2.guoguo.com 59m 123Mi
etcd-k8s-master3.guoguo.com 56m 117Mi
kube-apiserver-k8s-master1 34m 237Mi
kube-apiserver-k8s-master2.guoguo.com 48m 356Mi
kube-apiserver-k8s-master3.guoguo.com 42m 358Mi
kube-controller-manager-k8s-master1 2m 60Mi
kube-controller-manager-k8s-master2.guoguo.com 2m 25Mi
kube-controller-manager-k8s-master3.guoguo.com 20m 103Mi
kube-proxy-cmfbh 7m 28Mi
kube-proxy-ds4hd 10m 42Mi
kube-proxy-h2zgf 7m 26Mi
kube-proxy-hrn7q 12m 42Mi
kube-proxy-wlt5r 12m 29Mi
kube-proxy-z46nk 1m 42Mi
kube-scheduler-k8s-master1 3m 47Mi
kube-scheduler-k8s-master2.guoguo.com 4m 46Mi
kube-scheduler-k8s-master3.guoguo.com 3m 20Mi
metrics-server-684999f4d6-c584b 4m 33Mi
创建ns与删除ns
[root@k8s-master1 ~]# kubectl create ns test
#创建一个名为test的namespace
namespace/test created
[root@k8s-master1 ~]# kubectl get ns
NAME STATUS AGE
default Active 8d
kube-node-lease Active 8d
kube-public Active 8d
kube-system Active 8d
kubernetes-dashboard Active 8d
test Active 5s
[root@k8s-master1 ~]# kubectl delete ns test
#删除namespace,慎用慎用慎用!!!!!!!!!!!!!尽量不用,用前一定要看下里面有没有资源!!!!!!!
namespace "test" deleted
#查看ns里面的所有资源
[root@k8s-master1 ~]# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/calico-kube-controllers-6bd6b69df9-nxn2j 1/1 Running 13 (23h ago) 8d
pod/calico-node-2mbx5 1/1 Running 8 (23h ago) 8d
pod/calico-node-gplc5 1/1 Running 8 (23h ago) 8d
pod/calico-node-hvmm6 1/1 Running 8 (23h ago) 8d
pod/calico-node-kmtth 1/1 Running 8 (23h ago) 8d
pod/calico-node-ktnts 1/1 Running 8 (23h ago) 8d
pod/calico-node-s9shv 1/1 Running 8 (23h ago) 8d
pod/calico-typha-77fc8866f5-9wg6q 1/1 Running 8 (23h ago) 8d
pod/coredns-567c556887-422mn 1/1 Running 8 (23h ago) 8d
pod/coredns-567c556887-xgxtz 1/1 Running 8 (23h ago) 8d
pod/etcd-k8s-master1 1/1 Running 8 (23h ago) 8d
pod/etcd-k8s-master2.guoguo.com 1/1 Running 8 (23h ago) 8d
pod/etcd-k8s-master3.guoguo.com 1/1 Running 8 (23h ago) 8d
pod/kube-apiserver-k8s-master1 1/1 Running 19 (6h15m ago) 8d
pod/kube-apiserver-k8s-master2.guoguo.com 1/1 Running 11 (23h ago) 8d
pod/kube-apiserver-k8s-master3.guoguo.com 1/1 Running 11 (23h ago) 8d
pod/kube-controller-manager-k8s-master1 1/1 Running 9 (23h ago) 8d
pod/kube-controller-manager-k8s-master2.guoguo.com 1/1 Running 9 (6h17m ago) 8d
pod/kube-controller-manager-k8s-master3.guoguo.com 1/1 Running 9 (23h ago) 8d
pod/kube-proxy-cmfbh 1/1 Running 8 (23h ago) 8d
pod/kube-proxy-ds4hd 1/1 Running 8 (23h ago) 8d
pod/kube-proxy-h2zgf 1/1 Running 8 (23h ago) 8d
pod/kube-proxy-hrn7q 1/1 Running 8 (23h ago) 8d
pod/kube-proxy-wlt5r 1/1 Running 8 (23h ago) 8d
pod/kube-proxy-z46nk 1/1 Running 8 (23h ago) 8d
pod/kube-scheduler-k8s-master1 1/1 Running 9 (23h ago) 8d
pod/kube-scheduler-k8s-master2.guoguo.com 1/1 Running 8 (23h ago) 8d
pod/kube-scheduler-k8s-master3.guoguo.com 1/1 Running 10 (6h16m ago) 8d
pod/metrics-server-684999f4d6-c584b 1/1 Running 8 (23h ago) 8d
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/calico-typha ClusterIP 10.105.49.165 <none> 5473/TCP 8d
service/kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 8d
service/metrics-server ClusterIP 10.103.4.127 <none> 443/TCP 8d
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/calico-node 6 6 6 6 6 kubernetes.io/os=linux 8d
daemonset.apps/kube-proxy 6 6 6 6 6 kubernetes.io/os=linux 8d
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/calico-kube-controllers 1/1 1 1 8d
deployment.apps/calico-typha 1/1 1 1 8d
deployment.apps/coredns 2/2 2 2 8d
deployment.apps/metrics-server 1/1 1 1 8d
NAME DESIRED CURRENT READY AGE
replicaset.apps/calico-kube-controllers-6bd6b69df9 1 1 1 8d
replicaset.apps/calico-typha-77fc8866f5 1 1 1 8d
replicaset.apps/coredns-567c556887 2 2 2 8d
replicaset.apps/metrics-server-684999f4d6 1 1 1 8d
k8s删除namespace状态一直为terminating问题处理
出现原因:可能是当前集群比较繁忙,可能是某一个环节卡住了,虽然说你这个名称空间里面,但是这个名称空间一直会卡在这里
1、新开一个窗口运行命令 kubectl proxy
> 此命令启动了一个代理服务来接收来自你本机的HTTP连接并转发至API服务器,同时处理身份认证
2、新开一个终端窗口,将下面shell脚本整理到文本内`1.sh`并执行,$1参数即为删除不了的ns名称
#------------------------------------------------------------------------------------
#!/bin/bash
set -eo pipefail
die() { echo "$*" 1>&2 ; exit 1; }
need() {
which "$1" &>/dev/null || die "Binary '$1' is missing but required"
}
# checking pre-reqs
need "jq"
need "curl"
need "kubectl"
PROJECT="$1"
shift
test -n "$PROJECT" || die "Missing arguments: kill-ns <namespace>"
kubectl proxy &>/dev/null &
PROXY_PID=$!
killproxy () {
kill $PROXY_PID
}
trap killproxy EXIT
sleep 1 # give the proxy a second
kubectl get namespace "$PROJECT" -o json | jq 'del(.spec.finalizers[] | select("kubernetes"))' | curl -s -k -H "Content-Type: application/json" -X PUT -o /dev/null --data-binary @- http://localhost:8001/api/v1/namespaces/$PROJECT/finalize && echo "Killed namespace: $PROJECT"
#------------------------------------------------------------------------------------
3. 执行脚本删除
# bash 1.sh kubevirt
Killed namespace: kubevirt
1.sh: line 23: kill: (9098) - No such process