首页 > 其他分享 >kubernetes1.28.0配合apisix-ingress安装过程【1】

kubernetes1.28.0配合apisix-ingress安装过程【1】

时间:2024-04-25 16:57:14浏览次数:36  
标签:kubectl ingress -- kubernetes1.28 k8s local apisix

环境介绍


主机信息


注意:由于资源有限,通过三台vmware 虚拟机进行安装。

主机名 IP 操作系统 配置
k8s-master 192.168.199.101 Centos7.9 2CPU、4G内存、100G磁盘
k8s-node01 192.168.199.102 Centos7.9 2CPU、4G内存、100G磁盘
k8s-node02 192.168.199.103 Centos7.9 2CPU、4G内存、100G磁盘

软件版本信息


软件名 版本号
containerd v1.7.14
k8s v1.28.0
flannel v0.25.1
traefik v2.11

环境初始化


注意:所有主机执行初始化操作。

配置yum仓库

cd /etc/yum.repos.d/
mkdir bak ; mv *.repo bak/

curl https://mirrors.aliyun.com/repo/Centos-7.repo -o Centos-7.repo
curl https://mirrors.aliyun.com/repo/epel-7.repo -o epel-7.repo
sed -i '/aliyuncs/d' Centos-7.repo

#添加 kubernetes 仓库
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

修改主机名

hostnamectl set-hostname k8s-master

cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.199.101 k8s-master
192.168.199.102 k8s-node01
192.168.199.103 k8s-node02

#拷贝到两台node主机
root@k8s-master(192.168.199.101)~>for i in 1 2; do scp /etc/hosts 192.168.199.4$i:/etc/ ; done

配置ntp服务

yum install chrony ntpdate -y
sed "s/^server/#server/g" /etc/chrony.conf
echo 'server tiger.sina.com.cn iburst' >> /etc/chrony.conf
echo 'server ntp1.aliyun.com iburst' >> /etc/chrony.conf
systemctl enable chronyd ; systemctl start chronyd
ntpdate tiger.sina.com.cn

关闭selinux和firewalld

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
systemctl disable firewalld; systemctl stop firewalld

这里修改完成后,建议重启主机。

reboot

关闭swap

swapoff -a
sed -i  '/swap/s/^/#/' /etc/fstab

导入模块

cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

modprobe overlay
modprobe br_netfilter

配置内核参数

cat << EOF > /etc/sysctl.d/99-kubernetes-cri.conf
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
user.max_user_namespaces=28633
EOF

sysctl --system

配置支持ipvs

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

yum install -y ipset ipvsadm

部署Containerd


注意:所有主机安装containerd

nerdctl 下载地址:https://github.com/containerd/nerdctl/releases/download/v1.7.5/nerdctl-full-1.7.5-linux-amd64.tar.gz

tar xf nerdctl-full-1.7.5-linux-amd64.tar.gz  -C /usr/local/

生成containerd配置文件

mk /etc/containerd/
cd /etc/containerd/
containerd config default > config.toml

vim config.toml
...
SystemdCgroup = false #修改为true
...


再修改/etc/containerd/config.toml中的
[plugins."io.containerd.grpc.v1.cri"]
  ...
  # sandbox_image = "k8s.gcr.io/pause:3.6"
  sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.9"	#这里一定要注意,要根据下载到本地 pause镜像的版本来进行修改,否则初始化会过不去。

启动服务

systemctl enable --now containerd buildkit

查看版本

ctr version
Client:
  Version:  v1.7.14
  Revision: dcf2847247e18caba8dce86522029642f60fe96b
  Go version: go1.21.8

Server:
  Version:  v1.7.14
  Revision: dcf2847247e18caba8dce86522029642f60fe96b
  UUID: 426750f8-14ca-4490-8cca-3ded2cc2a21c

k8s-master安装操作


使用kubeadm部署k8s


注意:仅 k8s-master 节点执行此章节

安装程序包

yum install -y kubeadm-1.28.0 kubelet-1.28.0 kubectl-1.28.0

生成默认配置文件

kubeadm completion bash > /etc/bash_completion.d/kubeadm
kubectl completion bash > /etc/bash_completion.d/kubectl
source /etc/bash_completion.d/kubectl /etc/bash_completion.d/kubeadm

kubeadm config print init-defaults > kubeadm-init.yml

修改配置文件

vim kubeadm-init.yml

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 1.2.3.4
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: node
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.28.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
scheduler: {}


--------------------修改如下--------------------
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 0s #修改token过期时间为无限制
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.199.41 #修改为k8s-master节点IP
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: k8s-master	#修改为主机名
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers #替换为国内的镜像仓库
kind: ClusterConfiguration
kubernetesVersion: 1.28.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16 #为pod网络指定网络段
---
#申明cgroup用 systemd
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
failSwapOn: false
---
#启用ipvs
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

查看及下载镜像文件

#查看镜像
kubeadm config images list --config=kubeadm-init.yml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.28.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.28.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.28.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.28.0
registry.aliyuncs.com/google_containers/pause:3.9
registry.aliyuncs.com/google_containers/etcd:3.5.9-0
registry.aliyuncs.com/google_containers/coredns:v1.10.1

#下载镜像
kubeadm config images pull --config=kubeadm-init.yml

设置kubelet开机启动

#不设置在初始化集群会有告警信息
systemctl enable kubelet.service

初始化k8s集群

kubeadm init --config=kubeadm-init.yml | tee kubeadm-init.log
...

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.199.41:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:94805e71436365f20bca9e1e4a63509578bdc39c2428302c915b0c01fc111430

设置使用集群权限

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

查看node节点

kubectl get nodes
NAME         STATUS     ROLES           AGE    VERSION
k8s-master   NotReady   control-plane   105s   v1.28.0

安装网络插件flannet


注意:仅 k8s-master 节点执行此章节

下载配置文件

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

安装执行

kubectl apply -f kube-flannel.yml

查看k8s名称空间

kubectl get ns
NAME              STATUS   AGE
default           Active   3m44s
kube-flannel      Active   23s
kube-node-lease   Active   3m44s
kube-public       Active   3m44s
kube-system       Active   3m44s

kubectl get po -n kube-flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-465rx   1/1     Running   0          29s

kubectl get nodes
NAME         STATUS   ROLES           AGE     VERSION
k8s-master   Ready    control-plane   3m57s   v1.28.0

k8s-node节点操作


安装程序包

yum install -y kubeadm-1.28.0 kubelet-1.28.0 kubectl-1.28.0

设置kubelet开机启动

#不设置在初始化集群会有告警信息
systemctl enable kubelet.service

加入集群

kubeadm join 192.168.199.41:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:94805e71436365f20bca9e1e4a63509578bdc39c2428302c915b0c01fc111430

使用集群


查看集群节点

kubectl get nodes
NAME         STATUS   ROLES           AGE   VERSION
k8s-master   Ready    control-plane   23m   v1.28.0
k8s-node01   Ready    <none>          62s   v1.28.0

创建pod

kubectl run ngx --image=nginx:alpine --port=80 --restart=Always

查看pod

kubectl get po -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP           NODE         NOMINATED NODE   READINESS GATES
ngx    1/1     Running   0          16s   10.244.1.2   k8s-node01   <none>           <none>

创建service

kubectl expose pod ngx --port=80 --target-port=80 --name=ngx

查看service

kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP   32m
ngx          ClusterIP   10.110.223.232   <none>        80/TCP    22s

集群内,通过 cluster-ip 即可访问到 pod服务

curl -I  10.110.223.232
HTTP/1.1 200 OK
Server: nginx/1.25.4
Date: Tue, 16 Apr 2024 03:51:18 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Wed, 14 Feb 2024 16:20:36 GMT
Connection: keep-alive
ETag: "65cce854-267"
Accept-Ranges: bytes

ingress-controller


注意:k8s-mater 执行操作。

一直使用的 traefik 作为 ingress-controller ,此次项目更改为 APISIX 作为 ingress-controller

APISIX 官方文档:https://apisix.apache.org/docs/ingress-controller/getting-started/

Traefik VS apisix - https://apisix.incubator.apache.org/zh/blog/2022/12/19/apisix-ingress-better-than-traefik/

查阅了很久官方文档和网络上的资料,也没能实现想要的功能,因此详细记录本次安装调试过程,官方推荐使用 helm 安装。

安装helm

wget https://get.helm.sh/helm-v3.14.4-linux-amd64.tar.gz
tar xf helm-v3.14.4-linux-amd64.tar.gz
cp -a linux-amd64/helm  /usr/local/bin/
helm version
version.BuildInfo{Version:"v3.14.4", GitCommit:"81c902a123462fd4052bc5e9aa9c513c4c8fc142", GitTreeState:"clean", GoVersion:"go1.21.9"}

下载apisix

helm repo add apisix https://charts.apiseven.com
helm repo update
helm pull apisix/apisix
tar xf apisix-2.6.0.tgz
cd apisix

apisix常用配置


使用过 helm 的同学都知道,helm 需要根据自己的需要进行定制化。因此,该步骤就尤为重要。

官方文档 helm 安装 apisix :https://apisix.apache.org/docs/helm-chart/apisix/ 只是给了一个通用的例子,我们这里需要更具自己的环境进行自定义。

etcd集群

首先,apisix 会创建一个 etcd 集群(三个节点) 为了可用性需要注意以下几点:

  1. 必须三个 etcd 落在三个不同的物理节点上
  2. etcd 数据持久化问题,这里就需要用到 storageclass

这里就需要配置 storageclass ,根据我这里的环境,1台master、2台node,没有额外的存储环境,因此退而求其次。采用如下方案:

  • 每台节点创建一个固定的目录,然后让 pv指向该目录,通过 storagecalss 来实现 pvpvc 的绑定。

在每台主机上创建目录:

#该目录作为etcd数据持久化目录
mkdir -p /data/k8s/etcd-data

创建pv

vim pv-local.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-local-1	#注意名字
spec:
  capacity:
    storage: 20Gi	#容量大小
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage	# 创建storageClass时要用到
  local:
    path: /data/k8s/etcd-data	#本地持久化目录
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-master	#绑定到那个节点
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-local-2	#注意名字
spec:
  capacity:
    storage: 20Gi	#容量大小
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage	# 创建storageClass时要用到
  local:
    path: /data/k8s/etcd-data	#本地持久化目录
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-node01	#绑定到那个节点
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-local-3	#注意名字
spec:
  capacity:
    storage: 20Gi	#容量大小
  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Delete
  storageClassName: local-storage	# 创建storageClass时要用到
  local:
    path: /data/k8s/etcd-data	#本地持久化目录
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - k8s-node02	#绑定到那个节点

执行清单文件:

kubectl apply -f pv-local.yaml

创建storageclass

vim storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

执行清单文件:

kubectl apply -f storageclass.yaml

查看

kubectl get pv,sc
NAME                          CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS    REASON   AGE
persistentvolume/pv-local-1   20Gi       RWO            Delete           Available           local-storage            3m9s
persistentvolume/pv-local-2   20Gi       RWO            Delete           Available           local-storage            3m8s
persistentvolume/pv-local-3   20Gi       RWO            Delete           Available           local-storage            3m8s

NAME                                        PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
storageclass.storage.k8s.io/local-storage   kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  21s

接下来,就需要配置 etcd helm chart

cd apisix/charts/etcd/
vim values.yaml
 18   storageClass: "local-storage"	# 18行,定义storageClass 为:local-storage

这里还需要考虑一个问题,我这里是三物理节点的集群,因为 master 不参与 pod调度,因此这里无法组件三节点的 etcd 集群,因此需要能够将pod调度到master节点,进行如下配置:

#设置容忍所有污点的key,即允许调度到 master节点
vim values.yaml
452 tolerations:
453 - operator: "Exists"

apisix节点采用daemonSet


默认,apisix pod 采用的是 Deployment 控制器,需要修改为 daemonSet,这样从每个物理节点都可访问到 ingress controller

cd apisix/
vim values.yaml
#日志为true,启用 daemonSet控制器
useDaemonSet: true  
...
#设置容忍所有污点,即可调度到master节点
tolerations:
- operator: "Exists"	
...
#开启dashboard
dashboard:
enabled: true	
...
#设置基于kubernetes的服务发现
...
    envs:
      - KUBERNETES_SERVICE_HOST: "kubernetes.default.svc.cluster.local"
      - KUBERNETES_SERVICE_PORT: "443"
...
rbac:
  create: true
...
  discovery:
    enabled: true
    registry:
      kubernetes:
        service:
          schema: https
          host: ${KUBERNETES_SERVICE_HOST}
          port: ${KUBERNETES_SERVICE_PORT}
          //是否需要这个token
        client:
          token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
        namespace_selector:
          equal: default
        shared_size: 1m
        watch_endpoint_slices: false
        
#设置 ingress-controller
ingress-controller:
  enabled: true
  config:
    kubernetes:
      enableGatewayAPI: true
    apisix:
      adminAPIVersion: "v3"
      serviceNamespace: ingress-apisix

执行helm安装

helm install apisix . --namespace ingress-apisix --create-namespace -f values.yaml
NAME: apisix
LAST DEPLOYED: Wed Apr 24 11:21:11 2024
NAMESPACE: ingress-apisix
STATUS: deployed
REVISION: 1
NOTES:
1. Get the application URL by running these commands:
  export NODE_PORT=$(kubectl get --namespace ingress-apisix -o jsonpath="{.spec.ports[0].nodePort}" services apisix-gateway)
  export NODE_IP=$(kubectl get nodes --namespace ingress-apisix -o jsonpath="{.items[0].status.addresses[0].address}")
  echo http://$NODE_IP:$NODE_PORT

查看pod及服务

kubectl get po -n ingress-apisix -o wide
NAME                                        READY   STATUS    RESTARTS        AGE     IP           NODE         NOMINATED NODE   READINESS GATES
apisix-dashboard-9f6696d8f-z5f9x            1/1     Running   4 (3m36s ago)   4m47s   10.244.1.4   k8s-node02   <none>           <none>
apisix-wbx79                                1/1     Running   0               20s     10.244.0.8   k8s-master   <none>           <none>
apisix-7nt8t                                1/1     Running   0               4m47s   10.244.2.3   k8s-node01   <none>           <none>
apisix-jgqfn                                1/1     Running   0               72s     10.244.1.8   k8s-node02   <none>           <none>
apisix-etcd-1                               1/1     Running   0               39s     10.244.0.7   k8s-master   <none>           <none>
apisix-etcd-0                               1/1     Running   0               4m47s   10.244.2.4   k8s-node01   <none>           <none>
apisix-etcd-2                               1/1     Running   0               101s    10.244.1.7   k8s-node02   <none>           <none>
apisix-ingress-controller-7dd4cd4f5-9pbn6   1/1     Running   0               102s    10.244.2.5   k8s-node01   <none>           <none>

手动整理下,可以看到 podetcdapisix 都实现了三个节点,每个节点一个Pod的需求。

服务

kubectl get svc -n ingress-apisix
NAME                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
apisix-admin                               ClusterIP   10.104.110.134   <none>        9180/TCP                     8m30s
apisix-dashboard                           ClusterIP   10.104.148.32    <none>        80/TCP                       8m30s
apisix-etcd                                ClusterIP   10.103.56.180    <none>        2379/TCP,2380/TCP            8m30s
apisix-etcd-headless                       ClusterIP   None             <none>        2379/TCP,2380/TCP            8m30s
apisix-gateway                             NodePort    10.110.254.20    <none>        80:30952/TCP                 8m30s
apisix-ingress-controller                  ClusterIP   10.101.74.8      <none>        80/TCP                       5m26s
apisix-ingress-controller-apisix-gateway   NodePort    10.106.101.32    <none>        80:32029/TCP,443:30677/TCP   5m26s

apisix修改gateway监听80端口


在没有LB的情况下,一般希望 gateway 会监听到80 或者 443 端口,这里就需要进行如下修改。

最好不要去直接修改控制器,而是修改 chart 然后进行 upgrade

vim apisix/templates/deployment.yaml
...

          ports:
            - name: http
              containerPort: {{ .Values.service.http.containerPort }}
              hostPort: {{ .Values.service.http.hostPort }} #直接使用Pod的 hostport进行端口映射
              protocol: TCP
            {{- range .Values.service.http.additionalContainerPorts }}
            - name: http-{{ .port | toString }}
              containerPort: {{ .port }}
              protocol: TCP
            {{- end }}
            - name: tls
              containerPort: {{ .Values.apisix.ssl.containerPort }}
              hostPort: {{ .Values.apisix.ssl.hostPort }} #直接使用Pod的 hostport进行端口映射
              protocol: TCP

...

然后在 values.yaml进行定义:
vim apisix/values.yaml
  http:
    enabled: true
    servicePort: 80
    hostPort: 80
    containerPort: 9080
    # -- Support multiple http ports, See [Configuration](https://github.com/apache/apisix/blob/0bc65ea9acd726f79f80ae0abd8f50b7eb172e3d/conf/config-default.yaml#L24)
    additionalContainerPorts: []
      # - port: 9081
      #   enable_http2: true          # If not set, the default value is `false`.
      # - ip: 127.0.0.2               # Specific IP, If not set, the default value is `0.0.0.0`.
      #   port: 9082
      #   enable_http2: true
  # -- Apache APISIX service settings for tls
  tls:
    servicePort: 443
    hostPort: 443

进行上面修改后,升级chart

cd apisix/
helm upgrade  apisix . --namespace ingress-apisix --create-namespace -f values.yaml
Release "apisix" has been upgraded. Happy Helming!
NAME: apisix
LAST DEPLOYED: Thu Apr 25 16:15:51 2024
NAMESPACE: ingress-apisix
STATUS: deployed
REVISION: 2
NOTES:
1. Get the application URL by running these commands:
  export NODE_PORT=$(kubectl get --namespace ingress-apisix -o jsonpath="{.spec.ports[0].nodePort}" services apisix-gateway)
  export NODE_IP=$(kubectl get nodes --namespace ingress-apisix -o jsonpath="{.items[0].status.addresses[0].address}")
  echo http://$NODE_IP:$NODE_PORT

浏览器访问80 端口

image-20240425162050026

到此,gateway 已经开启了监听80 端口。


apisix修改dashboard监听9000


dashborad 修改为监听9000 端口,还是如上操作即可。

vim apisix/charts/apisix-dashboard/templates/deployment.yaml
...
          ports:
            - name: http
              containerPort: {{ .Values.config.conf.listen.port }}
              hostPort: {{ .Values.config.conf.listen.hostPort }}
...

修改 values.yaml
vim apisix/charts/apisix-dashboard/values.yaml
...
config:
  conf:
    listen:
      # -- The address on which the Manager API should listen.
      # The default value is 0.0.0.0, if want to specify, please enable it.
      # This value accepts IPv4, IPv6, and hostname.
      host: 0.0.0.0
      # -- The port on which the Manager API should listen.
      port: 9000
      hostPort: 9000
...

进行上面修改后,升级chart

cd apisix/
helm upgrade  apisix . --namespace ingress-apisix --create-namespace -f values.yaml
Release "apisix" has been upgraded. Happy Helming!
NAME: apisix
LAST DEPLOYED: Thu Apr 25 16:25:27 2024
NAMESPACE: ingress-apisix
STATUS: deployed
REVISION: 3
NOTES:
1. Get the application URL by running these commands:
  export NODE_PORT=$(kubectl get --namespace ingress-apisix -o jsonpath="{.spec.ports[0].nodePort}" services apisix-gateway)
  export NODE_IP=$(kubectl get nodes --namespace ingress-apisix -o jsonpath="{.items[0].status.addresses[0].address}")
  echo http://$NODE_IP:$NODE_PORT

通过浏览器访问9000,首先确定 dashboard Pod运行在那个物理节点

kubectl get po -n ingress-apisix  -o wide | egrep dashboard
apisix-dashboard-fd4d9fdc8-wrdnv            1/1     Running   0          69s     10.244.2.7   k8s-node02   <none>           <none>

运行在 k8s-node02 IP是 192.168.199.103 浏览器访问

image-20240425162734552

默认用户名:admin 密码:admin

到此,可以通过 dashboard 直接配置路由规则。


通过dashboard 配置路由规则


创建测试Pod

kubectl create deployment ngx --image nginx:alpine --replicas 2 --port 80
kubectl expose deployment ngx --port 80 --target-port 80 --name ngx

dashboard配置规则

image-20240425163241735

image-20240425163331217

image-20240425163416456

image-20240425163543670

点击下一步

image-20240425163715925

然后只需下一步、下一步、提交就好了。

image-20240425163802286

访问成功,可自行修改下Pod中的页面,然后刷新查看是否轮询。



--- EOF ---

标签:kubectl,ingress,--,kubernetes1.28,k8s,local,apisix
From: https://www.cnblogs.com/hukey/p/18158054

相关文章

  • 根据Nginx Ingress指标对指定后端进行HPA
    本文分享自华为云社区《根据NginxIngress指标对指定后端进行HPA》,作者:可以交个朋友。背景生产场景下,NginxIngress的流量会通过域名和path路径最终转发至不同的应用,而有时候cpu和内存并不是nginx的性能瓶颈,此时可以基于nginx_ingress_controller_requests指标,为其对应的应用配......
  • ingress-nginx-controller在k8s中的部署和使用
    点击查看代码下载deploy文件https://github.com/kubernetes/ingress-nginx/blob/controller-v1.9.6/deploy/static/provider/cloud/deploy.yaml修改deploy文件:有如下几处可按需修改:DaemonSet:修改Deployment为DaemonSet,移除strategy字段;hostNetwork:使用宿主机的网络;......
  • K8s集群nginx-ingress监控告警最佳实践
    本文分享自华为云社区《K8s集群nginx-ingress监控告警最佳实践》,作者:可以交个朋友。一背景nginx-ingress作为K8s集群中的关键组成部分。主要负责k8s集群中的服务发布,请求转发等功能。如果在访问服务过程中出现404和502等情况,需要引起注意。二方案简介可以通过CCE集群插件kub......
  • NGINX Ingress Controller 设置未配置过的域名增加默认路由
    背景k8s集群对应的公网slbip经常被人绑定域名,监控侧经常会收集到502相关状态码的异常告警,着手处理这种badcase策略1.所有没有在ingress配置过的域名要进行处理,即不是公司的、非法绑定到slb上的域名要加上一条策略2.NGINXIngressController设置未配置过的域名......
  • Ingress简单使用
    介绍Ingress是Kubernetes中用于公开和管理集群内服务的一种API资源。它充当了集群入口的角色,可以将外部流量路由到集群内部的服务,同时提供了一些高级功能,如负载均衡、TLS终止、虚拟主机路由等。下面是Ingress的一个简单示例,可将所有流量都发送到同一Service:通......
  • helm 安装 nginx-ingress-controller v1.10.0
    1、说明准备nginx-ingress三种不同的部署模式Deployment+LoadBalancer采用deployment进行部署nginx-ingress-controller,需要创建一个type:LoadBalancer的service进行关联nginx-ingress-controller这组pod。通常是在使用公有云进行创建负载均衡器并绑定公网地址。只要将域名......
  • 【CKA模拟题】Ingress新手必看,全面了解Ingress的基础操作
    题干Forthisquestion,pleasesetthiscontext(Inexam,diffclustername)kubectlconfiguse-contextkubernetes-admin@kubernetesThereexistsadeploymentnamednginx-deploymentexposedthroughaservicecallednginx-service.Createaningressres......
  • nginx-ingress-controller限制上传文件大小问题
    参考:https://www.cnblogs.com/pitaiyang/p/17975041报错信息nginx-ingress-controller限制上传文件大小为1M如果上传文件大于1M则会在浏览器报以下错误#RequestEntityTooLarge解决方法修改ingress配置文件增加以下配置annotations:#nginx.org/client-max-b......
  • Ingress 注释常用功能总结
    ingress注释常用功能总结--annotations-prefix #特定于NGINX控制器的入口注释的前缀。(默认“nginx.ingress.kubernetes.io”)1、域名重定向这个配置会把www.kailinhr.com跳转到www.zhuoliehr.com。#主要配置nginx.ingress/permanent-redirect-code:"301"nginx.ingres......
  • 在K8S中,ingress该如何使用?
    在Kubernetes(K8s)中,Ingress是一种API对象,它提供了对外部请求进入集群内部服务的一种统一入口和路由机制。Ingress控制器是一个运行在集群中的守护进程,它监听Ingress对象的变化并配置相应的负载均衡器或代理服务,以便根据定义的规则转发HTTP(S)请求到后端的Service。以下是使用......