首先感谢这份博客 https://my.oschina.net/u/4197945/blog/15510668 作者:运维有术星主
参考KubeSphere官网文档:https://www.kubesphere.io/zh/docs/v3.4/devops-user-guide/how-to-use/pipelines/create-a-pipeline-using-graphical-editing-panel/
此份文档记录配置过程的注意事项
1.集群添加节点,配置config文件,最好是用初始化时候的配置文件。
注意点:官方文档,添加新节点 https://www.kubesphere.io/zh/docs/v3.4/installing-on-linux/cluster-operation/add-new-nodes/
用此方法生产的配置文档,少了一些配置参数,比如是kubernetes运行时是docker, 还是containerd,默认会安装docker的运行时
所以我推荐使用初始化安装时候的配置文件
apiVersion: kubekey.kubesphere.io/v1alpha2 kind: Cluster metadata: name: sample spec: hosts: - {name: ksmaster01, address: 192.168.30.204, internalAddress: 192.168.30.204, user: root, password: "YnPJ+Wa9f9X8HMQ6qVc7"} - {name: ksmaster02, address: 192.168.30.205, internalAddress: 192.168.30.205, user: root, password: "YnPJ+Wa9f9X8HMQ6qVc8"} - {name: ksmaster03, address: 192.168.30.206, internalAddress: 192.168.30.206, user: root, password: "YnPJ+Wa9f9X8HMQ6qVc9"} - {name: ksworker04, address: 192.168.30.212, internalAddress: 192.168.30.212, user: root, password: "YnPJ+Wa9f9X8HMQ6qVc13"} roleGroups: etcd: - ksmaster01 - ksmaster02 - ksmaster03 control-plane: - ksmaster01 - ksmaster02 - ksmaster03 worker: - ksworker04 controlPlaneEndpoint: ## Internal loadbalancer for apiservers # internalLoadbalancer: haproxy domain: lb.kubesphere.local address: "192.168.30.203" port: 6443 kubernetes: version: v1.23.17 clusterName: cluster.local autoRenewCerts: true containerManager: containerd etcd: type: kubekey network: plugin: calico kubePodsCIDR: 10.233.64.0/18 kubeServiceCIDR: 10.233.0.0/18 ## multus support. https://github.com/k8snetworkplumbingwg/multus-cni multusCNI: enabled: false registry: auths: "harbor.emergen.cn": username: admin password: nt5Hw7T+FpAkQ4za/vBb skipTLSVerify: false plainHTTP: false privateRegistry: "harbor.emergen.cn/kubesphereio" namespaceOverride: "" registryMirrors: ["hub.deeprobe.online"] insecureRegistries: [] addons: [] --- apiVersion: installer.kubesphere.io/v1alpha1 kind: ClusterConfiguration metadata: name: ks-installer namespace: kubesphere-system labels: version: v3.4.1 spec: persistence: storageClass: "" authentication: jwtSecret: "" local_registry: "" # dev_tag: "" etcd: monitoring: false endpointIps: localhost port: 2379 tlsEnable: true common: core: console: enableMultiLogin: true port: 30880 type: NodePort # apiserver: # resources: {} # controllerManager: # resources: {} redis: enabled: false enableHA: false volumeSize: 2Gi openldap: enabled: false volumeSize: 2Gi minio: volumeSize: 20Gi monitoring: # type: external endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 GPUMonitoring: enabled: false gpu: kinds: - resourceName: "nvidia.com/gpu" resourceType: "GPU" default: true es: # master: # volumeSize: 4Gi # replicas: 1 # resources: {} # data: # volumeSize: 20Gi # replicas: 1 # resources: {} enabled: false logMaxAge: 7 elkPrefix: logstash basicAuth: enabled: false username: "" password: "" externalElasticsearchHost: "" externalElasticsearchPort: "" opensearch: # master: # volumeSize: 4Gi # replicas: 1 # resources: {} # data: # volumeSize: 20Gi # replicas: 1 # resources: {} enabled: true logMaxAge: 7 opensearchPrefix: whizard basicAuth: enabled: true username: "admin" password: "admin" externalOpensearchHost: "" externalOpensearchPort: "" dashboard: enabled: true alerting: enabled: true # thanosruler: # replicas: 1 # resources: {} auditing: enabled: true # operator: # resources: {} # webhook: # resources: {} devops: enabled: false jenkinsCpuReq: 0.5 jenkinsCpuLim: 1 jenkinsMemoryReq: 4Gi jenkinsMemoryLim: 4Gi jenkinsVolumeSize: 16Gi events: enabled: true # operator: # resources: {} # exporter: # resources: {} ruler: enabled: true replicas: 2 # resources: {} logging: enabled: true logsidecar: enabled: true replicas: 2 # resources: {} metrics_server: enabled: true monitoring: storageClass: "" node_exporter: port: 9100 # resources: {} # kube_rbac_proxy: # resources: {} # kube_state_metrics: # resources: {} prometheus: # replicas: 1 volumeSize: 40Gi # resources: {} # operator: # resources: {} # alertmanager: # replicas: 1 # resources: {} # notification_manager: # resources: {} # operator: # resources: {} # proxy: # resources: {} gpu: nvidia_dcgm_exporter: enabled: false # resources: {} multicluster: clusterRole: none network: networkpolicy: enabled: false ippool: type: calico topology: type: weave-scope openpitrix: store: enabled: true servicemesh: enabled: false istio: components: ingressGateways: - name: istio-ingressgateway enabled: false cni: enabled: false edgeruntime: enabled: true kubeedge: enabled: true cloudCore: cloudHub: advertiseAddress: - 192.168.30.203 service: cloudhubNodePort: "30000" cloudhubQuicNodePort: "30001" cloudhubHttpsNodePort: "30002" cloudstreamNodePort: "30003" tunnelNodePort: "30004" # resources: {} # hostNetWork: false iptables-manager: enabled: true mode: "external" # resources: {} # edgeService: # resources: {} gatekeeper: enabled: false # controller_manager: # resources: {} # audit: # resources: {} terminal: timeout: 600View Code
export KKZONE=cn ./kk add nodes -f config-nodes-add.yaml # 运行前,先执行第二步骤,确保node上已经有必须要的组件
2.配置新节点
2.1 yum 源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo-history wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo yum install -y wget yum clean all yum makecache
2.2 安装依赖组件,看官网文档中,这几个组件是必须的
yum -y install socat ipset conntrack ebtables ipvsadm
3.配置流水线-netcore环境
项目是由netcore语言开发
3.1 配置字典 jenkins-casc-config。找到 jenkins_user.yaml
添加 dotnetcore 容器环境,添加后等待一分钟左右,流水线才会出现环境配置
- name: "dotnetcore" namespace: "kubesphere-devops-worker" label: "dotnetcore" nodeUsageMode: "EXCLUSIVE" idleMinutes: 0 containers: - name: "dotnetcore" image: "harbor.emergen.cn/library/dotnet-emergen:3-5-6" # 指定自定义dotnet镜像 command: "cat" args: "" ttyEnabled: true privileged: true resourceRequestCpu: "100m" resourceLimitCpu: "4000m" resourceRequestMemory: "100Mi" resourceLimitMemory: "8192Mi" alwaysPullImage: true - name: "jnlp" #image: "jenkins/jnlp-slave:3.27-1" image: "harbor.emergen.cn/kubesphereio/jenkins/inbound-agent:4.10-2" #command: "jenkins-slave" args: "^${computer.jnlpmac} ^${computer.name}" resourceRequestCpu: "50m" resourceRequestMemory: "400Mi" resourceLimitMemory: "1536Mi" imagePullSecrets: # 指定私有仓库凭证 - name: harborsecret workspaceVolume: emptyDirWorkspaceVolume: memory: false volumes: - hostPathVolume: hostPath: "/var/run/docker.sock" mountPath: "/var/run/docker.sock" - hostPathVolume: # 将nuget包缓存持久化到hostPath hostPath: "/var/data/jenkins_nuget_cache" mountPath: "/root/.nuget" yaml: | spec: affinity: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 1 preference: matchExpressions: - key: node-role.kubernetes.io/worker operator: In values: - ci tolerations: - key: "node.kubernetes.io/ci" operator: "Exists" effect: "NoSchedule" - key: "node.kubernetes.io/ci" operator: "Exists" effect: "PreferNoSchedule" containers: - name: "dotnetcore" resources: requests: ephemeral-storage: "1Gi" limits: ephemeral-storage: "10Gi" volumeMounts: - name: config-volume mountPath: /root/.nuget/NuGet/NuGet.Config subPath: NuGet.Config volumes: - name: config-volume configMap: name: ks-devops-agent items: - key: NugetSetting path: NuGet.Config securityContext: fsGroup: 1000 - name: "dotnetcore2-emergen" # 自定义 Jenkins Agent 的名称。 label: "dotnetcore2-emergen" # 自定义 Jenkins Agent 的标签。若要指定多个标签,请用空格来分隔标签。 inheritFrom: "dotnetcore" # 该自定义 Jenkins Agent 所继承的现有容器组模板的名称。 containers: - name: "dotnetcore" # 该自定义 Jenkins Agent 所继承的现有容器组模板中指定的容器名称。 image: "harbor.emergen.cn/library/dotnet-emergen:2.2" # 可以使用自己的镜像。 imagePullSecrets: - name: harborsecret - name: "dotnetcore6-emergen" # 自定义 Jenkins Agent 的名称。 label: "dotnetcore6-emergen" # 自定义 Jenkins Agent 的标签。若要指定多个标签,请用空格来分隔标签。 inheritFrom: "dotnetcore" # 该自定义 Jenkins Agent 所继承的现有容器组模板的名称。 containers: - name: "dotnetcore" # 该自定义 Jenkins Agent 所继承的现有容器组模板中指定的容器名称。 image: "harbor.emergen.cn/library/dotnet-emergen:v6" # 可以使用自己的镜像。 imagePullSecrets: - name: harborsecretdotnetcore
4. 配置流水线-pipline
注意: 涉及到 credentialsId,都是在devops 凭据中配置,其他是运行参数
pipeline { agent { node { label 'dotnetcore' } } stages { stage('拉取代码') { steps { git(url: "${GIT_REPOSITORY_URL}", credentialsId: 'gitlab-auth', branch: "${BRANCH}", changelog: true, poll: false) } } stage('编译推送') { steps { container("dotnetcore"){ sh 'dotnet restore "./demo/demo.csproj" ' sh 'dotnet build ./demo/demo.csproj -c Release -o dist --force' sh 'podman build -t $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:$BUILD_NUMBER .' withCredentials([usernamePassword(credentialsId : 'harbor-auth' ,passwordVariable : 'HARBOR_PASSWORD' ,usernameVariable : 'HARBOR_USERNAME' ,)]) { sh '''echo "$HARBOR_PASSWORD" | podman login $REGISTRY -u "$HARBOR_USERNAME" --password-stdin ''' sh '''podman push $REGISTRY/$HARBOR_NAMESPACE/$APP_NAME:$BUILD_NUMBER ''' } } } } stage('部署') { steps { container ('dotnetcore') { withCredentials([ kubeconfigFile(credentialsId: 'kubeconfig', variable: 'KUBECONFIG')]) { sh 'envsubst < webapi.yaml | kubectl apply -f -' } } } } } }View Code
5. netcore环境的dockerfile
FROM registry.cn-beijing.aliyuncs.com/kubesphereio/builder-base:v3.2.2-podman RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \ -e 's|^#baseurl=http://mirror.centos.org/centos|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \ -i.bak \ /etc/yum.repos.d/CentOS-*.repo && \ rpm -Uvh https://packages.microsoft.com/config/centos/7/packages-microsoft-prod.rpm && \ yum install -y dotnet-sdk-3.1 && \ yum install -y dotnet-sdk-5.0 && \ yum install -y dotnet-sdk-6.0 && \ yum clean all #RUN dotnet tool install --global dotnet-sonarscanner --version 5.0.4 #ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/bin/sonar-scanner-3.3.0.1492-linux/bin:/root/.nuget/tools:/root/.dotnet/tools ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/.nuget/tools:/root/.dotnet/tools CMD ["dotnet", "--list-sdks"]View Code
6.个人理解
流水线的运行会启动一个pod ,这个pod将使用agnet.node.label中定义的镜像,这个文档开始讲的jenkins_user.yaml是相互关联的
如果没用使用container(){} 包含住脚本的话,他将在base的容器中运行。他们之间的应该是文件共享的一个方式
标签:false,name,KubeSphere,enabled,添加,dotnet,true,节点,resources From: https://www.cnblogs.com/younger5/p/18388347