> 文档中心 > k8s-v1.20.10 3master+2worker节点(Keepalived+HAproxy高可用)

k8s-v1.20.10 3master+2worker节点(Keepalived+HAproxy高可用)


k8s-v1.20.10 3master&1node

实验环境

主机网络信息和组件信息

K8S集群角色 IP 主机名 安装的组件
VIP 192.168.0.15 VIP VIP
MASTER 192.168.0.11 k8s-master-1 apiserver、controller-manager、scheduler、etcd、docker、kubectl、kubelet、kube-proxy、calico、coredns、metric-server、keepalived
MASTER 192.168.0.12 k8s-master-2 apiserver、controller-manager、scheduler、etcd、docker、kubectl、kubelet、kube-proxy、calico、coredns、metric-server、keepalived
MASTER 192.168.0.13 k8s-master-3 apiserver、controller-manager、scheduler、etcd、docker、kubectl、kubelet、kube-proxy、calico、coredns、metric-server、keepalived
WORK 192.168.0.21 k8s-node-1 kubelet、kube-proxy、docker、calico
WORK 192.168.0.22 k8s-node-2 kubelet、kube-proxy、docker、calico

​ 注:正常情况下master节点只负责调度,不负责运行kube-proxy、calico、coredns、metric-server,处于节约资源考虑,这里让master也负责工作

#系统版本 Centos7.9(4.19.12-1.el7.elrepo.x86_64)# 配置4GB内存/2vcpu/70G硬盘,开启虚拟化,NAT网络模式# 组件版本k8s-server&k8s-node(apiserver、kubectl、kube-scheduler、kube-proxy) 1.20.10etcd 3.5.0pause: v3.6calico/node:v3.20.1calico/pod2daemon-flexvol:v3.20.1calico/cni:v3.20.1coredns/coredns: v1.7.0docker: 20.10.8metric-server:v0.4.1dashboard: v2.3.1# 网络service: 10.0.0.0/16pod: 10.70.0.0/16

主机证书信息

​ CA机构三套:apiserver一套,etcd一套,api聚合层一套(由于和apiserver共用一套CA会发生冲突这里单独使用一个CA),颁发机构分别为:ca-apiserver,ca-etcd,front-proxy-ca

主机初始化

配置主机名

# k8s-master-1hostnamectl set-hostname k8s-master-1 && bash# k8s-master-2hostnamectl set-hostname k8s-master-2 && bash# k8s-master-3hostnamectl set-hostname k8s-master-3 && bash# k8s-node-1hostnamectl set-hostname k8s-node-1 && bash# k8s-node-2hostnamectl set-hostname k8s-node-2 && bash

配置HOST文件

# master,node五台cat <<EOF>>/etc/hosts192.168.0.11 k8s-master-1 192.168.0.12 k8s-master-2 192.168.0.13 k8s-master-3 192.168.0.21 k8s-node-1 192.168.0.22 k8s-node-2 EOF

免密登录

# 我这里出于简单考虑,让5台机器使用同一套公钥私钥,并实现免密登录[root@k8s-master-1 ~]# ssh-keygen -t rsa[root@k8s-master-1 ~]# cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys[root@k8s-master-1 ~]# chmod 600 /root/.ssh/authorized_keys[root@k8s-master-1 ~]# scp -r /root/.ssh root@k8s-master-2:/root[root@k8s-master-1 ~]# scp -r /root/.ssh root@k8s-master-3:/root[root@k8s-master-1 ~]# scp -r /root/.ssh root@k8s-node-1:/root[root@k8s-master-1 ~]# scp -r /root/.ssh root@k8s-node-2:/root

关闭防火墙

# master,node# 关闭防火墙systemctl disable firewalld --now# 关闭selinuxsed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/configsed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinuxsetenforce 0

关闭交换分区

# master,nodeswapoff -a && sysctl -w vm.swappiness=0sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab

yum源配置

# master,nodecurl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repoyum install -y yum-utils device-mapper-persistent-data lvm2yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.reposed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo# 安装基础依赖包yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel  python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate

配置同步时间

# master,node# 同步时间    ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime    echo 'Asia/Shanghai' >/etc/timezone    ntpdate time2.aliyun.com    # 加入到crontab*/5 * * * * /usr/sbin/ntpdate time2.aliyun.com

内核升级

# master,node# 更新系统yum update -y --exclude=kernel* # 将kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm,kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm 上传到五个节点for i in k8s-master-{1..3} k8s-node-{1..2}; do scp kernel-ml-4.19.12-1.el7.elrepo.x86_64.rpm kernel-ml-devel-4.19.12-1.el7.elrepo.x86_64.rpm root@$i:/root ;done# 安装内核yum localinstall -y kernel-ml*# 所有节点更改内核启动顺序,并在内核开启user namespacegrub2-set-default  0 && grub2-mkconfig -o /etc/grub2.cfggrubby --args="user_namespace.enable=1" --update-kernel="$(grubby --default-kernel)"# 所有节点重启,检查默认内核是不是4.19grubby --default-kernel

修改内核参数

# 开启ipvs,不开启ipvs将会使用iptables进行数据包转发,但是效率低,所以官网推荐需要开通ipvsyum install -y ipvsadm ipset sysstat conntrack libseccomp # 在内核4.19+版本nf_conntrack_ipv4已经改为nf_conntrack, 4.18以下使用nf_conntrack_ipv4即可cat > /etc/modules-load.d/ipvs.conf <<EOFip_vsip_vs_lcip_vs_wlcip_vs_rrip_vs_wrrip_vs_lblcip_vs_lblcrip_vs_dhip_vs_ship_vs_foip_vs_nqip_vs_sedip_vs_ftpip_vs_shnf_conntracknf_netip_tablesip_setxt_setipt_setipt_rpfilteript_REJECTipipEOFsystemctl enable --now systemd-modules-load.service
# master,node,# 末尾添加如下内容cat >> /etc/security/limits.conf <<EOF* soft nofile 65536* hard nofile 131072* soft nproc 65535* hard nproc 655350* soft memlock unlimited* hard memlock unlimited EOF
# master,node# 开启k8s内核参数cat <<EOF > /etc/sysctl.d/k8s.confnet.ipv4.ip_forward = 1net.bridge.bridge-nf-call-iptables = 1net.bridge.bridge-nf-call-ip6tables = 1fs.may_detach_mounts = 1vm.overcommit_memory=1vm.panic_on_oom=0fs.inotify.max_user_watches=89100fs.file-max=52706963fs.nr_open=52706963net.netfilter.nf_conntrack_max=2310720net.ipv4.tcp_keepalive_time = 600net.ipv4.tcp_keepalive_probes = 3net.ipv4.tcp_keepalive_intvl =15net.ipv4.tcp_max_tw_buckets = 36000net.ipv4.tcp_tw_reuse = 1net.ipv4.tcp_max_orphans = 327680net.ipv4.tcp_orphan_retries = 3net.ipv4.tcp_syncookies = 1net.ipv4.tcp_max_syn_backlog = 16384net.ipv4.ip_conntrack_max = 65536net.ipv4.tcp_max_syn_backlog = 16384net.ipv4.tcp_timestamps = 0net.core.somaxconn = 16384EOFsysctl --system# 重启后,检查是否加载[root@k8s-master-1 ~]# lsmod | grep -e ip_vs -e nf_conntrackip_vs_ftp16384  0 nf_nat   32768  1 ip_vs_ftpip_vs_sed16384  0 ip_vs_nq 16384  0 ip_vs_fo 16384  0 ip_vs_sh 16384  0 ip_vs_dh 16384  0 ip_vs_lblcr     16384  0 ip_vs_lblc      16384  0 ip_vs_wrr16384  0 ip_vs_rr 16384  0 ip_vs_wlc16384  0 ip_vs_lc 16384  0 ip_vs   151552  24 ip_vs_wlc,ip_vs_rr,ip_vs_dh,ip_vs_lblcr,ip_vs_sh,ip_vs_fo,ip_vs_nq,ip_vs_lblc,ip_vs_wrr,ip_vs_lc,ip_vs_sed,ip_vs_ftpnf_conntrack   143360  2 nf_nat,ip_vsnf_defrag_ipv6  20480  1 nf_conntracknf_defrag_ipv4  16384  1 nf_conntracklibcrc32c16384  4 nf_conntrack,nf_nat,xfs,ip_vs

安装docker

# master,node# 安装docker-ceyum install docker-ce.* -ymkdir /etc/dockercat > /etc/docker/daemon.json <<EOF{  "exec-opts": ["native.cgroupdriver=systemd"],  "registry-mirrors": ["https://ornb7jit.mirror.aliyuncs.com"],  "default-ipc-mode": "shareable"}EOFsystemctl daemon-reload && systemctl enable --now docker

CA初始化

注意点:

  1. 所有证书均在master节点生成,然后下发给其他node节点
  2. etcd、apiserver、apiaggregation这里分别使用了三套CA机构来颁发证书,通常情况下etcd、apiserver和与apiserver通信的其他组件可以共用一套CA机构,apiaggregation一套CA机构
# 创建CA配置文件cat > ca-config.json <<EOF{    "signing": { "default": {     "expiry": "87600h" }, "profiles": {     "kubernetes": {  "expiry": "87600h",  "usages": [      "signing",      "key encipherment",      "server auth",      "client auth"  ]     } }    }}EOF

注解:

字段 解释
signing 表示该证书可用于签名其它证书,生成的 ca.pem 证书中CA=TRUE
server auth 表示 client 可以用该该证书对 server 提供的证书进行验证
client auth 表示 server 可以用该该证书对 client 提供的证书进行验证;
config.json 可以定义多个profiles,分别指定不同的过期时间、使用场景等参数;后续在签名证书时使用某个profile

etcd-ca

# 创建CA请求文件cat > etcd-ca-csr.json <<EOF{  "CN": "etcd",    "key": { "algo": "rsa", "size": 2048    },      "names": [{    "C": "CN", "L": "hunan", "ST": "changsha", "O": "k8s", "OU": "system"    }]   }EOF

注解:

字段 解释
hosts 这里为空,任意主机都能使用etcd-ca.pem这个证书
CN Common Name,kube-apiserver 从证书中提取该字段作为请求的用户名(User Name),浏览器使用该字段验证网站是否合法,申请 SSL 证书的具体网站域名
C 申请单位所属国家,只能是两个字母的国家码。例如,中国填写为 CN
L Locality,地区,城市
ST State,州,省
O Organization,kube-apiserver 从证书中提取该字段作为请求用户所属的组 (Group),公司名称
OU 部门名称
# 生成CA证书[root@k8s-master-1 pki]# cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca2022/04/16 12:45:47 [INFO] generating a new CA key and certificate from CSR2022/04/16 12:45:47 [INFO] generate received request2022/04/16 12:45:47 [INFO] received CSR2022/04/16 12:45:47 [INFO] generating key: rsa-20482022/04/16 12:45:47 [INFO] encoded CSR2022/04/16 12:45:47 [INFO] signed certificate with serial number 82530505131211927853480790880463068529989227777# 查看生成内容[root@k8s-master-1 pki]# ls etcd*etcd-ca.csr  etcd-ca-csr.json  etcd-ca-key.pem  etcd-ca.pem

注解:

  1. etcd-ca-key.pem 生成的私钥
  2. etcd-ca.pem 生成的证书,后续将使用这个去颁发证书

kube-apiserver-ca

# 创建CA请求文件cat > kube-apiserver-ca-csr.json <<EOF {  "CN": "kubernetes",  "key": {      "algo": "rsa",      "size": 2048  },  "names": [{      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "k8s",      "OU": "system"    }]}EOF# 生成CA证书cfssl gencert -initca kube-apiserver-ca-csr.json | cfssljson -bare kube-apiserver-ca

apiaggregation-ca

# 创建CA请求文件cat > front-proxy-ca-csr.json <<EOF {  "CN": "kubernetes",  "key": {      "algo": "rsa",      "size": 2048  },  "names": [{      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "k8s",      "OU": "system"    }]}EOF# 生成CA证书cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca

部署etcd

​ Kubernetes使用Etcd进行数据存储,所以先准备一个Etcd数据库,为解决Etcd单点故障,应采用集群方式部署,如果使用3台作为集群可以容忍1台故障,如果5台作为集群可以容忍2台故障

创建etcd证书

# hosts字段中IP为所有etcd节点的集群内部通信IP,可以预留几个,做扩容用# 创建etcd请求文件cat > etcd-csr.json<<EOF {  "CN": "etcd",  "hosts": [    "127.0.0.1",    "192.168.0.11",    "192.168.0.12",    "192.168.0.13",    "192.168.0.15"  ],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [{    "C": "CN",    "ST": "hunan",    "L": "changsha",    "O": "k8s",    "OU": "system"  }]}EOF# 生成证书cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd

创建etcd配置文件

三个MASTER节点配置即可

# 创建相应文件夹mkdir -p /etc/etcd/sslmkdir -p /var/lib/etcd/default.etcd# 发送etcd命令for i in k8s-master-{1..3}; do scp etcd etcdctl etcdutl root@$i:/usr/bin; done# k8s-master-1 创建etcd配置文件cat <<EOF>/etc/etcd/etcd.confname: 'k8s-master-1'data-dir: /var/lib/etcdwal-dir: /var/lib/etcd/walsnapshot-count: 5000heartbeat-interval: 100election-timeout: 1000quota-backend-bytes: 0listen-peer-urls: 'https://192.168.0.11:2380'listen-client-urls: 'https://192.168.0.11:2379,http://127.0.0.1:2379'max-snapshots: 3max-wals: 5cors:initial-advertise-peer-urls: 'https://192.168.0.11:2380'advertise-client-urls: 'https://192.168.0.11:2379'discovery:discovery-fallback: 'proxy'discovery-proxy:discovery-srv:initial-cluster: 'k8s-master-1=https://192.168.0.11:2380,k8s-master-2=https://192.168.0.12:2380,k8s-master-3=https://192.168.0.13:2380'initial-cluster-token: 'etcd-k8s-cluster'initial-cluster-state: 'new'strict-reconfig-check: falseenable-v2: trueenable-pprof: trueproxy: 'off'proxy-failure-wait: 5000proxy-refresh-interval: 30000proxy-dial-timeout: 1000proxy-write-timeout: 5000proxy-read-timeout: 0client-transport-security:  cert-file: '/etc/etcd/ssl/etcd.pem'  key-file: '/etc/etcd/ssl/etcd-key.pem'  client-cert-auth: true  trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'  auto-tls: truepeer-transport-security:  cert-file: '/etc/etcd/ssl/etcd.pem'  key-file: '/etc/etcd/ssl/etcd-key.pem'  peer-client-cert-auth: true  trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'  auto-tls: truedebug: falselog-package-levels:log-outputs: [default]force-new-cluster: falseEOF# k8s-master-2 创建etcd配置文件cat <<EOF>/etc/etcd/etcd.confname: 'k8s-master-2'data-dir: /var/lib/etcdwal-dir: /var/lib/etcd/walsnapshot-count: 5000heartbeat-interval: 100election-timeout: 1000quota-backend-bytes: 0listen-peer-urls: 'https://192.168.0.12:2380'listen-client-urls: 'https://192.168.0.12:2379,http://127.0.0.1:2379'max-snapshots: 3max-wals: 5cors:initial-advertise-peer-urls: 'https://192.168.0.12:2380'advertise-client-urls: 'https://192.168.0.12:2379'discovery:discovery-fallback: 'proxy'discovery-proxy:discovery-srv:initial-cluster: 'k8s-master-1=https://192.168.0.11:2380,k8s-master-2=https://192.168.0.12:2380,k8s-master-3=https://192.168.0.13:2380'initial-cluster-token: 'etcd-k8s-cluster'initial-cluster-state: 'new'strict-reconfig-check: falseenable-v2: trueenable-pprof: trueproxy: 'off'proxy-failure-wait: 5000proxy-refresh-interval: 30000proxy-dial-timeout: 1000proxy-write-timeout: 5000proxy-read-timeout: 0client-transport-security:  cert-file: '/etc/etcd/ssl/etcd.pem'  key-file: '/etc/etcd/ssl/etcd-key.pem'  client-cert-auth: true  trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'  auto-tls: truepeer-transport-security:  cert-file: '/etc/etcd/ssl/etcd.pem'  key-file: '/etc/etcd/ssl/etcd-key.pem'  peer-client-cert-auth: true  trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'  auto-tls: truedebug: falselog-package-levels:log-outputs: [default]force-new-cluster: falseEOF# k8s-master-3 创建etcd配置文件cat <<EOF>/etc/etcd/etcd.confname: 'k8s-master-3'data-dir: /var/lib/etcdwal-dir: /var/lib/etcd/walsnapshot-count: 5000heartbeat-interval: 100election-timeout: 1000quota-backend-bytes: 0listen-peer-urls: 'https://192.168.0.13:2380'listen-client-urls: 'https://192.168.0.13:2379,http://127.0.0.1:2379'max-snapshots: 3max-wals: 5cors:initial-advertise-peer-urls: 'https://192.168.0.13:2380'advertise-client-urls: 'https://192.168.0.13:2379'discovery:discovery-fallback: 'proxy'discovery-proxy:discovery-srv:initial-cluster: 'k8s-master-1=https://192.168.0.11:2380,k8s-master-2=https://192.168.0.12:2380,k8s-master-3=https://192.168.0.13:2380'initial-cluster-token: 'etcd-k8s-cluster'initial-cluster-state: 'new'strict-reconfig-check: falseenable-v2: trueenable-pprof: trueproxy: 'off'proxy-failure-wait: 5000proxy-refresh-interval: 30000proxy-dial-timeout: 1000proxy-write-timeout: 5000proxy-read-timeout: 0client-transport-security:  cert-file: '/etc/etcd/ssl/etcd.pem'  key-file: '/etc/etcd/ssl/etcd-key.pem'  client-cert-auth: true  trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'  auto-tls: truepeer-transport-security:  cert-file: '/etc/etcd/ssl/etcd.pem'  key-file: '/etc/etcd/ssl/etcd-key.pem'  peer-client-cert-auth: true  trusted-ca-file: '/etc/etcd/ssl/etcd-ca.pem'  auto-tls: truedebug: falselog-package-levels:log-outputs: [default]force-new-cluster: falseEOF# 创建启动服务文件cat<<EOF> /usr/lib/systemd/system/etcd.service[Unit]Description=Etcd ServiceDocumentation=https://coreos.com/etcd/docs/latest/After=network.target[Service]Type=notifyExecStart=/usr/bin/etcd --config-file=/etc/etcd/etcd.confRestart=on-failureRestartSec=10LimitNOFILE=65536[Install]WantedBy=multi-user.targetAlias=etcd3.serviceEOF# 将证书移动到相应位置for i in k8s-master-{1..3}; do scp etcd.pem etcd-key.pem etcd-ca.pem root@$i:/etc/etcd/ssl; done# 启动etcdsystemctl daemon-reload && systemctl enable etcd --now # 查看etcd集群状态[root@k8s-master-1 pki]# export ETCDCTL_API=3[root@k8s-master-1 pki]# etcdctl --write-out=table --cacert=/etc/etcd/ssl/etcd-ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints="https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.0.13:2379"  endpoint health+---------------------------+--------+-------------+-------+|  ENDPOINT   | HEALTH |    TOOK     | ERROR |+---------------------------+--------+-------------+-------+| https://192.168.0.13:2379 |   true | 12.723101ms ||| https://192.168.0.11:2379 |   true | 12.450365ms ||| https://192.168.0.12:2379 |   true | 28.175045ms ||+---------------------------+--------+-------------+-------+[root@k8s-master-1 pki]#  etcdctl --write-out=table --cacert=/etc/etcd/ssl/etcd-ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints="https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.0.13:2379"  endpoint status+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+-|  ENDPOINT   | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+-| https://192.168.0.11:2379 | 8e6cfa29594e45ef |   3.5.0 |   20 kB |     false |      false |  2 |  17 |   17 | || https://192.168.0.12:2379 | e3e3021ea0428114 |   3.5.0 |   29 kB |     false |      false |  2 |  17 |   17 | || https://192.168.0.13:2379 | ac9ec362c31e5fdd |   3.5.0 |   20 kB |      true |      false |  2 |  17 |   17 | |+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+-

部署apiserver

上传k8s组件

# 上传kubernetes-server二进制包(master)for i in k8s-master-{1..3}; do scp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy root@$i:/usr/bin; donefor i in k8s-node-{1..2}; do scp kube-proxy kubelet root@$i:/usr/bin; done# 创建相关目录(master,node)mkdir -p /etc/kubernetes/sslmkdir -p /var/log/kubernetes

创建token.csv文件

# 格式:token,用户名,UID,用户组,kubelet-bootstrap这个用户要被api-server所信任cat > token.csv << EOF$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"EOF# system:kubelet-bootstrap 这个组内置

注:token.csv后边用于给kubelet自动颁发证书所使用的

创建apiserver证书

# 创建apiserver请求文件cat > kube-apiserver-csr.json <<EOF{  "CN": "kubernetes",  "hosts": [    "127.0.0.1",    "192.168.0.11",    "192.168.0.12",    "192.168.0.13",    "192.168.0.15",    "10.0.0.1",    "kubernetes",    "kubernetes.default",    "kubernetes.default.svc",    "kubernetes.default.svc.cluster",    "kubernetes.default.svc.cluster.local"  ],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "k8s",      "OU": "system"    }  ]}EOF# hosthost内填写运行apiserver的主机IP/VIP,service的第一个IP,其余按照上面填写即可,node节点由于是使用bootstrap机制自动颁发证书,不用将其IP填写进来一般情况下hosts字段中IP为所有Master/LB/VIP IP# 生成证书cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson  -bare kube-apiserver

创建apiaggregation证书

# 创建apiaggregation证书请求文件cat > front-proxy-client-csr.json <<EOF {  "CN": "front-proxy-client",  "key": {    "algo": "rsa",    "size": 2048  },  "names": [{    "C": "CN",    "ST": "hunan",    "L": "changsha",    "O": "k8s",    "OU": "system"  }]}EOF# 生成证书cfssl gencert -ca=front-proxy-ca.pem -ca-key=front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson  -bare front-proxy-client

创建service公/私钥

# 生成私钥openssl genrsa -out ./service.key 2048# 生成公钥openssl rsa -in ./service.key -pubout -out ./service.pub

注:这对公私钥主要用于service account

创建apiserver配置文件

#  k8s-master-1创建apiserver配置文件cat > /usr/lib/systemd/system/kube-apiserver.service <<"EOF"[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=etcd.serviceWants=etcd.service [Service]ExecStart=/usr/bin/kube-apiserver \    --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \    --anonymous-auth=false \    --bind-address=0.0.0.0 \    --secure-port=6443 \    --advertise-address=192.168.0.11 \    --insecure-port=0 \    --authorization-mode=Node,RBAC \    --runtime-config=api/all=true \    --enable-bootstrap-token-auth \    --enable-aggregator-routing=true \    --feature-gates=EphemeralContainers=true \    --token-auth-file=/etc/kubernetes/token.csv \    --service-cluster-ip-range=10.0.0.0/16 \    --service-node-port-range=30000-50000 \    --service-account-key-file=/etc/kubernetes/ssl/service.pub \    --service-account-signing-key-file=/etc/kubernetes/ssl/service.key \    --service-account-issuer=https://kubernetes.default.svc.cluster.local \    --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \    --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --client-ca-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \    --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \    --etcd-certfile=/etc/etcd/ssl/etcd.pem \    --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \    --etcd-servers=https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.0.13:2379 \    --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem \    --requestheader-allowed-names=front-proxy-client   \    --requestheader-extra-headers-prefix=X-Remote-Extra-  \    --requestheader-group-headers=X-Remote-Group     \    --requestheader-username-headers=X-Remote-User   \    --proxy-client-cert-file=/etc/kubernetes/ssl/front-proxy-client.pem  \    --proxy-client-key-file=/etc/kubernetes/ssl/front-proxy-client-key.pem    \    --enable-swagger-ui=true \    --allow-privileged=true \    --apiserver-count=1 \    --audit-log-maxage=30 \    --audit-log-maxbackup=3 \    --audit-log-maxsize=100 \    --audit-log-path=/var/log/kube-apiserver-audit.log \    --event-ttl=1h \    --alsologtostderr=true \    --logtostderr=false \    --log-dir=/var/log/kubernetes \    --v=2    Restart=on-failureRestartSec=5Type=notifyLimitNOFILE=65536 [Install]WantedBy=multi-user.targetEOF#  k8s-master-2创建apiserver配置文件cat > /usr/lib/systemd/system/kube-apiserver.service <<"EOF"[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=etcd.serviceWants=etcd.service [Service]ExecStart=/usr/bin/kube-apiserver \    --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \    --anonymous-auth=false \    --bind-address=0.0.0.0 \    --secure-port=6443 \    --advertise-address=192.168.0.12 \    --insecure-port=0 \    --authorization-mode=Node,RBAC \    --runtime-config=api/all=true \    --enable-bootstrap-token-auth \    --enable-aggregator-routing=true \    --feature-gates=EphemeralContainers=true \    --token-auth-file=/etc/kubernetes/token.csv \    --service-cluster-ip-range=10.0.0.0/16 \    --service-node-port-range=30000-50000 \    --service-account-key-file=/etc/kubernetes/ssl/service.pub \    --service-account-signing-key-file=/etc/kubernetes/ssl/service.key \    --service-account-issuer=https://kubernetes.default.svc.cluster.local \    --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \    --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --client-ca-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \    --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \    --etcd-certfile=/etc/etcd/ssl/etcd.pem \    --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \    --etcd-servers=https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.0.13:2379 \    --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem \    --requestheader-allowed-names=front-proxy-client   \    --requestheader-extra-headers-prefix=X-Remote-Extra-  \    --requestheader-group-headers=X-Remote-Group     \    --requestheader-username-headers=X-Remote-User   \    --proxy-client-cert-file=/etc/kubernetes/ssl/front-proxy-client.pem  \    --proxy-client-key-file=/etc/kubernetes/ssl/front-proxy-client-key.pem    \    --enable-swagger-ui=true \    --allow-privileged=true \    --apiserver-count=1 \    --audit-log-maxage=30 \    --audit-log-maxbackup=3 \    --audit-log-maxsize=100 \    --audit-log-path=/var/log/kube-apiserver-audit.log \    --event-ttl=1h \    --alsologtostderr=true \    --logtostderr=false \    --log-dir=/var/log/kubernetes \    --v=2    Restart=on-failureRestartSec=5Type=notifyLimitNOFILE=65536 [Install]WantedBy=multi-user.targetEOF#  k8s-master-3创建apiserver配置文件cat > /usr/lib/systemd/system/kube-apiserver.service <<"EOF"[Unit]Description=Kubernetes API ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=etcd.serviceWants=etcd.service [Service]ExecStart=/usr/bin/kube-apiserver \    --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \    --anonymous-auth=false \    --bind-address=0.0.0.0 \    --secure-port=6443 \    --advertise-address=192.168.0.13 \    --insecure-port=0 \    --authorization-mode=Node,RBAC \    --runtime-config=api/all=true \    --enable-bootstrap-token-auth \    --enable-aggregator-routing=true \    --feature-gates=EphemeralContainers=true \    --token-auth-file=/etc/kubernetes/token.csv \    --service-cluster-ip-range=10.0.0.0/16 \    --service-node-port-range=30000-50000 \    --service-account-key-file=/etc/kubernetes/ssl/service.pub \    --service-account-signing-key-file=/etc/kubernetes/ssl/service.key \    --service-account-issuer=https://kubernetes.default.svc.cluster.local \    --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem  \    --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --client-ca-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \    --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \    --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \    --etcd-certfile=/etc/etcd/ssl/etcd.pem \    --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \    --etcd-servers=https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.0.13:2379 \    --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem \    --requestheader-allowed-names=front-proxy-client   \    --requestheader-extra-headers-prefix=X-Remote-Extra-  \    --requestheader-group-headers=X-Remote-Group     \    --requestheader-username-headers=X-Remote-User   \    --proxy-client-cert-file=/etc/kubernetes/ssl/front-proxy-client.pem  \    --proxy-client-key-file=/etc/kubernetes/ssl/front-proxy-client-key.pem    \    --enable-swagger-ui=true \    --allow-privileged=true \    --apiserver-count=1 \    --audit-log-maxage=30 \    --audit-log-maxbackup=3 \    --audit-log-maxsize=100 \    --audit-log-path=/var/log/kube-apiserver-audit.log \    --event-ttl=1h \    --alsologtostderr=true \    --logtostderr=false \    --log-dir=/var/log/kubernetes \    --v=2    Restart=on-failureRestartSec=5Type=notifyLimitNOFILE=65536 [Install]WantedBy=multi-user.targetEOF# 复制证书到相应目录for i in k8s-master-{1..3}; do scp service.pub service.key kube-apiserver.pem kube-apiserver-key.pem kube-apiserver-ca.pem kube-apiserver-ca-key.pem front-proxy-client.pem front-proxy-client-key.pem front-proxy-ca.pem root@$i:/etc/kubernetes/ssl/; donefor i in k8s-master-{1..3}; do scp token.csv root@$i:/etc/kubernetes; done# 启动systemctl daemon-reload systemctl enable kube-apiserver.service --now# 检查是否正常运行systemctl status kube-apiserver# 不携带证书访问[root@k8s-master-1 pki]# curl -k https://192.168.0.11:6443{  "kind": "Status",  "apiVersion": "v1",  "metadata": {      },  "status": "Failure",  "message": "Unauthorized",  "reason": "Unauthorized",  "code": 401}

部署keepalived+HAproxy

部署HAproxy

# MASTER节点安装HAproxyyum -y install haproxy# HAproxy配置cat <<EOF > /etc/haproxy/haproxy.cfg global  maxconn  2000  ulimit-n  16384  log  127.0.0.1 local0 err  stats timeout 30sdefaults  log global  mode  http  option  httplog  timeout connect 5000  timeout client  50000  timeout server  50000  timeout http-request 15s  timeout http-keep-alive 15sfrontend k8s-master  bind 0.0.0.0:16443  bind 127.0.0.1:16443  mode tcp  option tcplog  tcp-request inspect-delay 5s  default_backend k8s-masterbackend k8s-master  mode tcp  option tcplog  option tcp-check  balance roundrobin  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100  server k8s-master-1    192.168.0.11:6443  check  server k8s-master-2    192.168.0.12:6443  check  server k8s-master-3    192.168.0.13:6443  checkEOF# 启动haproxy服务systemctl enable haproxy --now

部署keepalived

# MASTER节点安装keepalivedyum install -y keepalived# k8s-master-1配置keepalivedcat <<EOF>/etc/keepalived/keepalived.conf! Configuration File for keepalivedglobal_defs {    router_id LVS_DEVEL}vrrp_script chk_apiserver {    script "/etc/keepalived/check_apiserver.sh"    interval 5     weight -5    fall 2    rise 1}vrrp_instance VI_1 {    state BACKUP    interface ens33    mcast_src_ip 192.168.0.11    virtual_router_id 51    priority 100    nopreempt    advert_int 2    authentication { auth_type PASS auth_pass K8SHA_KA_AUTH    }    virtual_ipaddress { 192.168.0.15    }    track_script {      chk_apiserver     } }EOF# k8s-master-2配置keepalivedcat <<EOF>/etc/keepalived/keepalived.conf! Configuration File for keepalivedglobal_defs {    router_id LVS_DEVEL}vrrp_script chk_apiserver {    script "/etc/keepalived/check_apiserver.sh"    interval 5     weight -5    fall 2    rise 1}vrrp_instance VI_1 {    state BACKUP    interface ens33    mcast_src_ip 192.168.0.12    virtual_router_id 51    priority 100    nopreempt    advert_int 2    authentication { auth_type PASS auth_pass K8SHA_KA_AUTH    }    virtual_ipaddress { 192.168.0.15    }    track_script {      chk_apiserver     } }EOF# k8s-master-3配置keepalivedcat <<EOF>/etc/keepalived/keepalived.conf! Configuration File for keepalivedglobal_defs {    router_id LVS_DEVEL}vrrp_script chk_apiserver {    script "/etc/keepalived/check_apiserver.sh"    interval 5     weight -5    fall 2    rise 1}vrrp_instance VI_1 {    state BACKUP    interface ens33    mcast_src_ip 192.168.0.13    virtual_router_id 51    priority 100    nopreempt    advert_int 2    authentication { auth_type PASS auth_pass K8SHA_KA_AUTH    }    virtual_ipaddress { 192.168.0.15    }    track_script {      chk_apiserver     } }EOF# 配置检查脚本 /etc/keepalived/check_apiserver.sh#!/bin/bashERROR=0for i in $(seq 1 4)doif ! ss -tunlp | grep kube-apiserver &>/dev/null ; thenERROR=$(expr $ERROR+1)sleep 1continueelseERROR=0breakfidoneif [ $ERROR == "0" ]; thenexit 0else/usr/bin/systemctl stop keepalivedexit 1fi# 设置xchmod a+x /etc/keepalived/check_apiserver.sh# 启动keepalivedsystemctl enable keepalived --now# 访问测试[root@k8s-master-1 pki]# curl -k https://192.168.0.15:16443{  "kind": "Status",  "apiVersion": "v1",  "metadata": {      },  "status": "Failure",  "message": "Unauthorized",  "reason": "Unauthorized",  "code": 401}

部署kubectl

创建kubctl证书

# 创建kubectl证书请求文件cat > admin-csr.json <<EOF{  "CN": "admin",  "hosts": [],  "key": {    "algo": "rsa",    "size": 2048  },  "names": [    {      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "system:masters",     "OU": "system"    }  ]}EOF# 生成证书cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin# 将证书放入相应位置for i in k8s-master-{1..3}; do scp admin*.pem root@$i:/etc/kubernetes/ssl; done

注解:

  1. cluster-admin(内置角色,权限最大) 将 Group system:masters 与 Role cluster-admin 绑定,该 Role 授予了调用kube-apiserver 的所有 API的权限
  2. O指定该证书的 Group 为 system:masters,必须是system:masters,否则后面kubectl create clusterrolebinding报错

创建kubectl的kubeconfig配置文件

# 设置集群参数kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.15:16443 --kubeconfig=kube.config# 设置客户端认证参数kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config# 设置上下文参数kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config# 设置默认上下文kubectl config use-context kubernetes --kubeconfig=kube.config    # 拷贝到MASTER节点for i in k8s-master-{1..3}; do ssh root@$i 'mkdir -p ~/.kube'; donefor i in k8s-master-{1..3}; do scp kube.config root@$i:/root/.kube/config; done# 查看svc[root@k8s-master-1 ssl]# kubectl get svcNAME  TYPE CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGEkubernetes   ClusterIP   10.0.0.1     <none> 443/TCP   52m# 授权apiserver用户访问kubelet,这个用户在apiserver证书的CN字段声明了,后续apiserver需要与kubelet通信kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes# kubectl create clusterrolebinding kubernetes --clusterrole=cluster-admin --user=kubernetes clusterrolebinding.rbac.authorization.k8s.io/kubernetes created

部署kube-controller-manager

创建kube-controller-manager证书

# 创建kube-controller-manager证书请求文件cat > kube-controller-manager-csr.json <<EOF{    "CN": "system:kube-controller-manager",    "key": { "algo": "rsa", "size": 2048    },    "hosts": [      "127.0.0.1",      "192.168.0.11",      "192.168.0.12",      "192.168.0.13",      "192.168.0.15"    ],    "names": [      { "C": "CN", "ST": "hunan", "L": "changsha", "O": "system:kube-controller-manager", "OU": "system"      }    ]}EOF# 生成证书cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

注解:

  1. system:kube-controller-manager,kubernetes 内置的 ClusterRoleBindings system:kube-controller-manager 赋予 kube-controller-manager 工作所需的权限

创建kube-controller-manager的kubeconfig

# 设置集群参数kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.15:16443 --kubeconfig=kube-controller-manager.kubeconfig# 设置客户端认证参数kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig# 设置上下文参数kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig# 设置默认上下文kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

创建kube-controller-manager的配置文件

# 创建kube-controller-manager启动配置文件cat > kube-controller-manager.service <<"EOF"[Unit]      Description=Kubernetes Controller ManagerDocumentation=https://github.com/kubernetes/kubernetes[Service]      ExecStart=/usr/bin/kube-controller-manager \    --port=10252 \    --secure-port=10257 \    --bind-address=127.0.0.1 \    --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \    --service-cluster-ip-range=10.0.0.0/16 \    --cluster-name=kubernetes \    --cluster-signing-cert-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --cluster-signing-key-file=/etc/kubernetes/ssl/kube-apiserver-ca-key.pem \    --cluster-signing-duration=87600h \    --allocate-node-cidrs=true \    --cluster-cidr=10.70.0.0/16 \    --node-cidr-mask-size=24 \    --root-ca-file=/etc/kubernetes/ssl/kube-apiserver-ca.pem \    --service-account-private-key-file=/etc/kubernetes/ssl/service.key \    --use-service-account-credentials=true \    --leader-elect=true \    --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true,EphemeralContainers=true \    --controllers=*,bootstrapsigner,tokencleaner \    --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \    --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \    --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem \    --requestheader-allowed-names=front-proxy-client   \    --requestheader-extra-headers-prefix=X-Remote-Extra-  \    --requestheader-group-headers=X-Remote-Group     \    --requestheader-username-headers=X-Remote-User   \    --horizontal-pod-autoscaler-use-rest-clients=true \    --alsologtostderr=true \    --logtostderr=false \    --log-dir=/var/log/kubernetes \    --v=2      Restart=on-failureRestartSec=5   [Install]      WantedBy=multi-user.targetEOF# 复制文件for i in k8s-master-{1..3}; do scp kube-controller-manager*.pem root@$i:/etc/kubernetes/ssl; donefor i in k8s-master-{1..3}; do scp kube-controller-manager.service root@$i:/usr/lib/systemd/system/; donefor i in k8s-master-{1..3}; do scp kube-controller-manager.kubeconfig root@$i:/etc/kubernetes; done# 启动服务systemctl daemon-reloadsystemctl enable kube-controller-manager --now# 检查kube-controller-manager运行状态[root@k8s-master-3 ~]# kubectl get csWarning: v1 ComponentStatus is deprecated in v1.19+NAME   STATUS      MESSAGE   ERRORscheduler     Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   controller-manager   Healthy     ok etcd-2 Healthy     {"health":"true","reason":""}  etcd-0 Healthy     {"health":"true","reason":""}  etcd-1 Healthy     {"health":"true","reason":""}  

部署kube-scheduler

创建kube-scheduler证书

# 创建证书请求文件cat > kube-scheduler-csr.json <<EOF{    "CN": "system:kube-scheduler",    "hosts": [      "127.0.0.1",      "192.168.0.11",      "192.168.0.12",      "192.168.0.13",      "192.168.0.15"    ],    "key": { "algo": "rsa", "size": 2048    },    "names": [      { "C": "CN", "ST": "hunan", "L": "changsha", "O": "system:kube-scheduler", "OU": "system"      }    ]}EOF注:O 为 system:kube-scheduler,kubernetes 内置的 ClusterRoleBindings system:kube-scheduler 将赋予 kube-scheduler 工作所需的权限# 生成证书cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

创建kube-scheduler的kubeconfig

# 设置集群参数kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.15:16443 --kubeconfig=kube-scheduler.kubeconfig# 设置客户端认证参数kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig# 设置上下文参数kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig# 设置默认上下文kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

创建kube-scheduler的配置文件

cat > kube-scheduler.service <<"EOF"[Unit]   Description=Kubernetes SchedulerDocumentation=https://github.com/kubernetes/kubernetes[Service]ExecStart=/usr/bin/kube-scheduler  \    --bind-address=127.0.0.1  \    --port=10251 \    --secure-port=10259 \    --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig  \    --feature-gates=EphemeralContainers=true \    --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem \    --requestheader-allowed-names=front-proxy-client   \    --requestheader-extra-headers-prefix=X-Remote-Extra-  \    --requestheader-group-headers=X-Remote-Group     \    --requestheader-username-headers=X-Remote-User   \    --leader-elect=true  \    --alsologtostderr=true  \    --logtostderr=false  \    --log-dir=/var/log/kubernetes  \     --v=2 Restart=on-failureRestartSec=5 [Install]WantedBy=multi-user.targetEOF# 复制文件for i in k8s-master-{1..3}; do scp kube-scheduler*.pem root@$i:/etc/kubernetes/ssl; donefor i in k8s-master-{1..3}; do scp kube-scheduler.service root@$i:/usr/lib/systemd/system; donefor i in k8s-master-{1..3}; do scp kube-scheduler.kubeconfig root@$i:/etc/kubernetes; done# 启动服务systemctl daemon-reloadsystemctl enable kube-scheduler.service --now# 查看服务状态[root@k8s-master-1 pki]# kubectl get csWarning: v1 ComponentStatus is deprecated in v1.19+NAME   STATUS    MESSAGE    ERRORscheduler     Healthy   ok  controller-manager   Healthy   ok  etcd-1 Healthy   {"health":"true","reason":""}   etcd-2 Healthy   {"health":"true","reason":""}   etcd-0 Healthy   {"health":"true","reason":""}

部署kubelet

注意:

本文由于master节点需要运行calico,coredns等系统组件(pod方式运行),所以master节点需要部署kubelet和kube-proxy

# 截取tokenBOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

创建kubelet的kubeconfig

# 设置集群参数kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.15:16443 --kubeconfig=kubelet-bootstrap.kubeconfig# 设置客户端认证参数kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig# 设置上下文参数kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig# 设置默认上下文kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig

创建kublet的配置文件

cat <<EOF>kubelet.jsonapiVersion: kubelet.config.k8s.io/v1beta1kind: KubeletConfigurationaddress: 0.0.0.0port: 10250readOnlyPort: 10255authentication:  anonymous:    enabled: false  webhook:    cacheTTL: 2m0s    enabled: true  x509:    clientCAFile: /etc/kubernetes/ssl/kube-apiserver-ca.pemauthorization:  mode: Webhook  webhook:    cacheAuthorizedTTL: 5m0s    cacheUnauthorizedTTL: 30scgroupDriver: systemdcgroupsPerQOS: trueclusterDNS:- 10.0.0.10clusterDomain: cluster.localcontainerLogMaxFiles: 5containerLogMaxSize: 10MicontentType: application/vnd.kubernetes.protobufcpuCFSQuota: truecpuManagerPolicy: nonecpuManagerReconcilePeriod: 10senableControllerAttachDetach: trueenableDebuggingHandlers: trueenforceNodeAllocatable:- podseventBurst: 10eventRecordQPS: 5evictionHard:  imagefs.available: 15%  memory.available: 100Mi  nodefs.available: 10%  nodefs.inodesFree: 5%evictionPressureTransitionPeriod: 5m0sfailSwapOn: truefileCheckFrequency: 20shairpinMode: promiscuous-bridgehealthzBindAddress: 127.0.0.1healthzPort: 10248httpCheckFrequency: 20simageGCHighThresholdPercent: 85imageGCLowThresholdPercent: 80imageMinimumGCAge: 2m0siptablesDropBit: 15iptablesMasqueradeBit: 14kubeAPIBurst: 10kubeAPIQPS: 5makeIPTablesUtilChains: truemaxOpenFiles: 1000000maxPods: 110nodeStatusUpdateFrequency: 10soomScoreAdj: -999podPidsLimit: -1registryBurst: 10registryPullQPS: 5resolvConf: /etc/resolv.confrotateCertificates: trueruntimeRequestTimeout: 2m0sserializeImagePulls: truestaticPodPath: /etc/kubernetes/manifestsstreamingConnectionIdleTimeout: 4h0m0ssyncFrequency: 1m0svolumeStatsAggPeriod: 1m0sEOF

创建kubelet启动文件

# 创建kubelet启动配置,master节点cat > kubelet.service<<"EOF"[Unit]Description=Kubernetes KubeletDocumentation=https://github.com/kubernetes/kubernetesAfter=docker.serviceRequires=docker.service[Service]WorkingDirectory=/var/lib/kubeletExecStart=/usr/bin/kubelet \    --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \    --cert-dir=/etc/kubernetes/ssl \    --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \    --feature-gates=EphemeralContainers=true,RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \    --config=/etc/kubernetes/kubelet.json \    --network-plugin=cni \    --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.6 \    --rotate-certificates=true  \    --alsologtostderr=true \    --logtostderr=false \    --log-dir=/var/log/kubernetes \    --v=2Restart=on-failureRestartSec=5 [Install]WantedBy=multi-user.targetEOF# 创建文件夹mkdir -p /var/lib/kubeletmkdir -p /etc/kubernetes/manifests# 移动相关文件for i in k8s-node-{1..2}; do scp kube-apiserver-ca.pem root@$i:/etc/kubernetes/ssl; donefor i in k8s-master-{1..3} k8s-node-{1..2}; do scp kubelet.service root@$i:/usr/lib/systemd/system; donefor i in k8s-master-{1..3} k8s-node-{1..2}; do scp kubelet-bootstrap.kubeconfig root@$i:/etc/kubernetes/; donefor i in k8s-master-{1..3} k8s-node-{1..2}; do scp kubelet.json root@$i:/etc/kubernetes/; done# 启动服务systemctl daemon-reloadsystemctl enable kubelet --now

创建RBAC规则自动批复CSR

apiserver 自动创建了两条 ClusterRole,分别是

  1. system:certificates.k8s.io:certificatesigningrequests:nodeclient
  2. system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
# 我们再增加一条cat <<EOF | kubectl apply -f -kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserverrules:- apiGroups: ["certificates.k8s.io"]  resources: ["certificatesigningrequests/selfnodeserver"]  verbs: ["create"]EOF# 将ClusterRole绑定到适当的用户组,以完成自动批准相关CSR请求,此处的system:bootstrappers组与token.csv中的组对应# token.csv,格式 Token,用户名,UID,用户组fbecd7fb7d3c75efc7f8bd8c0896addf,kubelet-bootstrap,10001,"system:kubelet-bootstrap"# 允许 system:bootstrappers 组用户创建 CSR 请求kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:kubelet-bootstrap# 自动批准 system:bootstrappers 组用户 TLS bootstrapping 首次申请证书的 CSR 请求,clusterrolebinding kubelet-bootstrap及node-client-auto-approve-csr 中的--group=system:kubelet-bootstrap 可以替换为--user=kubelet-bootstrap,与token.csv保持一致kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:kubelet-bootstrap# 自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes# 自动批准 system:nodes 组用户更新 kubelet 10250 api 端口证书的 CSR 请求kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes# 查看csr,可以发现master节点加入集群后,自动就签发证书了[root@k8s-master-1 pki]# kubectl get csrNAME AGE   SIGNERNAME REQUESTOR    CONDITIONcsr-8nqgq   29s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issuedcsr-cl2pq   26s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issuedcsr-rfc6j   56s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issuedcsr-s8fw4   26s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issuedcsr-x6qff   34s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued# 查看节点状态[root@k8s-master-1 pki]# kubectl get nodesNAME    STATUS     ROLES    AGE   VERSIONk8s-master-1   NotReady   <none>   54s   v1.20.10k8s-master-2   NotReady   <none>   33s   v1.20.10k8s-master-3   NotReady   <none>   33s   v1.20.10k8s-node-1     NotReady   <none>   40s   v1.20.10k8s-node-2     NotReady   <none>   41s   v1.20.10

部署kube-proxy

创建kube-proxy证书

# 创建证书请求文件cat > kube-proxy-csr.json <<EOF{  "CN": "system:kube-proxy",  "key": {    "algo": "rsa",    "size": 2048  },  "names": [{      "C": "CN",      "ST": "hunan",      "L": "changsha",      "O": "system:kube-proxy",      "OU": "system"}]}EOF# 生成证书cfssl gencert -ca=kube-apiserver-ca.pem -ca-key=kube-apiserver-ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

注解:

  1. CN:指定该证书的 User 为 system:kube-proxy
  2. 预定义的 RoleBinding system:node-proxier 将User system:kube-proxy 与 Role system:node-proxier 绑定,该 Role 授予了调用 kube-apiserver Proxy 相关 API 的权限
  3. 该证书只会被 kube-proxy 当做 client 证书使用,所以 hosts 字段为空

创建kube-proxy的kubeconfig

# 设置集群参数kubectl config set-cluster kubernetes --certificate-authority=kube-apiserver-ca.pem --embed-certs=true --server=https://192.168.0.15:16443 --kubeconfig=kube-proxy.kubeconfig# 设置客户端认证参数kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig# 设置上下文参数kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig# 设置默认上下文kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

创建kube-proxy配置文件

# 创建kube-proxy配置文件cat > kube-proxy.yaml << EOFapiVersion: kubeproxy.config.k8s.io/v1alpha1kind: KubeProxyConfigurationbindAddress: 0.0.0.0clientConnection:  acceptContentTypes: ""  burst: 10  contentType: application/vnd.kubernetes.protobuf  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig  qps: 5clusterCIDR: 192.168.0.0/24configSyncPeriod: 15m0sconntrack:  max: null  maxPerCore: 32768  min: 131072  tcpCloseWaitTimeout: 1h0m0s  tcpEstablishedTimeout: 24h0m0shealthzBindAddress: 0.0.0.0:10256metricsBindAddress: 0.0.0.0:10249enableProfiling: falsehostnameOverride: ""iptables:  masqueradeAll: false  masqueradeBit: 14  minSyncPeriod: 0s  syncPeriod: 30s ipvs:  masqueradeAll: true  minSyncPeriod: 5s  scheduler: "rr"  syncPeriod: 30s mode: "ipvs"nodePortAddresses: nulloomScoreAdj: -999udpIdleTimeout: 250msEOF

创建kube-proxy启动文件

# 创建kube-proxy启动文件cat > kube-proxy.service <<"EOF"[Unit]Description=Kubernetes Kube-Proxy ServerDocumentation=https://github.com/kubernetes/kubernetesAfter=network.target [Service]WorkingDirectory=/var/lib/kube-proxyExecStart=/usr/bin/kube-proxy \  --config=/etc/kubernetes/kube-proxy.yaml \  --alsologtostderr=true \  --logtostderr=false \  --log-dir=/var/log/kubernetes \  --v=2Restart=on-failureRestartSec=5LimitNOFILE=65536 [Install]WantedBy=multi-user.targetEOF# 创建文件夹mkdir -p /var/lib/kube-proxy# 拷贝证书,创建相关文件夹for i in k8s-master-{1..3} k8s-node-{1..2}; do scp kube-proxy.service root@$i:/usr/lib/systemd/system; donefor i in k8s-master-{1..3} k8s-node-{1..2}; do scp kube-proxy.yaml root@$i:/etc/kubernetes/; donefor i in k8s-master-{1..3} k8s-node-{1..2}; do scp kube-proxy.kubeconfig root@$i:/etc/kubernetes/; done# 启动服务systemctl daemon-reloadsystemctl enable kube-proxy.service --now

添加集群角色

# 查看当前集群状态,默认应该是NotReady[root@k8s-master-1 pki]# kubectl get nodesNAME    STATUS     ROLES    AGE   VERSIONk8s-master-1   NotReady   <none>   18m   v1.20.10k8s-master-2   NotReady   <none>   18m   v1.20.10k8s-master-3   NotReady   <none>   18m   v1.20.10k8s-node-1     NotReady   <none>   18m   v1.20.10k8s-node-2     NotReady   <none>   18m   v1.20.10# 设置k8s-master-{1..3}为master节点kubectl label nodes k8s-master-{1..3} node-role.kubernetes.io/master=# 设置k8s-node-*为work节点kubectl label nodes k8s-node-{1..2} node-role.kubernetes.io/node=# 设置master一般情况下不接受调度,只接受必须组件的调度kubectl taint nodes k8s-master-{1..3} node-role.kubernetes.io/master=true:NoSchedule# 或者设置master节点也能接受调度kubectl taint nodes k8s-master-{1..3} node-role.kubernetes.io/master-# 查看集群当前状态[root@k8s-master-1 pki]# kubectl get nodesNAME    STATUS     ROLES    AGE   VERSIONk8s-master-1   NotReady   master   20m   v1.20.10k8s-master-2   NotReady   master   20m   v1.20.10k8s-master-3   NotReady   master   20m   v1.20.10k8s-node-1     NotReady   node     20m   v1.20.10k8s-node-2     NotReady   node     20m   v1.20.10

部署calico

calico官网部署指导链接

# 下载文件curl -O https://docs.projectcalico.org/manifests/calico-etcd.yaml# CALICO_IPV4POOL_CIDR修改为pod IP# 修改的地方# 添加证书- name: CALICO_IPV4POOL_CIDR  value: "10.70.0.0/16"      - name: IP_AUTODETECTION_METHOD  value: interface="ens.*"- name: KUBERNETES_SERVICE_HOST      value: "192.168.0.15"    - name: KUBERNETES_SERVICE_PORT      value: "16443"    - name: KUBERNETES_SERVICE_PORT_HTTPS      value: "16443" ETCD_CA=`cat /etc/kubernetes/pki/etcd/etcd-ca.pem | base64 | tr -d '\n'`ETCD_CERT=`cat /etc/kubernetes/pki/etcd/etcd.pem | base64 | tr -d '\n'`ETCD_KEY=`cat /etc/kubernetes/pki/etcd/etcd-key.pem | base64 | tr -d '\n'`# 运行calicokubectl apply -f calico-etcd.yaml
---# Source: calico/templates/calico-etcd-secrets.yaml# The following contains k8s Secrets for use with a TLS enabled etcd cluster.# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/# 如果使用了外置的etcd,必须配置secret,并且把下边的configmap开启apiVersion: v1  kind: Secrettype: Opaquemetadata:  name: calico-etcd-secrets  namespace: kube-systemdata:  # Populate the following with etcd TLS configuration if desired, but leave blank if  # not using TLS for etcd.  # The keys below should be uncommented and the values populated with the base64  # encoded contents of each file that would be associated with the TLS data.  # Example command for encoding a file contents: cat  | base64 -w 0  etcd-key:   etcd-cert:   etcd-ca: ---# Source: calico/templates/calico-config.yaml# This ConfigMap is used to configure a self-hosted Calico installation.kind: ConfigMapapiVersion: v1metadata:  name: calico-config  namespace: kube-systemdata:  # Configure this with the location of your etcd cluster.  etcd_endpoints: "https://192.168.0.11:2379,https://192.168.0.12:2379,https://192.168.13.2379"  # If you're using TLS enabled etcd uncomment the following.  # You must also populate the Secret below with these files.  etcd_ca: "/calico-secrets/etcd-ca"  etcd_cert: "/calico-secrets/etcd-cert"  etcd_key: "/calico-secrets/etcd-key"  # Typha is disabled.  typha_service_name: "none"  # Configure the backend to use.  calico_backend: "bird"  # Configure the MTU to use for workload interfaces and tunnels.  # By default, MTU is auto-detected, and explicitly setting this field should not be required.  # You can override auto-detection by providing a non-zero value.  veth_mtu: "0"  # The CNI network configuration to install on each node. The special  # values in this config will be automatically populated.  cni_network_config: |-    {      "name": "k8s-pod-network",      "cniVersion": "0.3.1",      "plugins": [ {   "type": "calico",   "log_level": "info",   "log_file_path": "/var/log/calico/cni/cni.log",   "etcd_endpoints": "__ETCD_ENDPOINTS__",   "etcd_key_file": "__ETCD_KEY_FILE__",   "etcd_cert_file": "__ETCD_CERT_FILE__",   "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",   "mtu": __CNI_MTU__,   "ipam": {"type": "calico-ipam"   },   "policy": {"type": "k8s"   },   "kubernetes": {"kubeconfig": "__KUBECONFIG_FILEPATH__"   } }, {   "type": "portmap",   "snat": true,   "capabilities": {"portMappings": true} }, {   "type": "bandwidth",   "capabilities": {"bandwidth": true} }      ]    }---# Source: calico/templates/calico-kube-controllers-rbac.yaml# Include a clusterrole for the kube-controllers component,# and bind it to the calico-kube-controllers serviceaccount.kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: calico-kube-controllersrules:  # Pods are monitored for changing labels.  # The node controller monitors Kubernetes nodes.  # Namespace and serviceaccount labels are used for policy.  - apiGroups: [""]    resources:      - pods      - nodes      - namespaces      - serviceaccounts    verbs:      - watch      - list      - get  # Watch for changes to Kubernetes NetworkPolicies.  - apiGroups: ["networking.k8s.io"]    resources:      - networkpolicies    verbs:      - watch      - list---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1metadata:  name: calico-kube-controllersroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: calico-kube-controllerssubjects:- kind: ServiceAccount  name: calico-kube-controllers  namespace: kube-system------# Source: calico/templates/calico-node-rbac.yaml# Include a clusterrole for the calico-node DaemonSet,# and bind it to the calico-node serviceaccount.kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: calico-noderules:  # The CNI plugin needs to get pods, nodes, and namespaces.  - apiGroups: [""]    resources:      - pods      - nodes      - namespaces    verbs:      - get  # EndpointSlices are used for Service-based network policy rule  # enforcement.  - apiGroups: ["discovery.k8s.io"]    resources:      - endpointslices    verbs:      - watch- list  - apiGroups: [""]    resources:      - endpoints      - services    verbs:      # Used to discover service IPs for advertisement.      - watch      - list  # Pod CIDR auto-detection on kubeadm needs access to config maps.  - apiGroups: [""]    resources:      - configmaps    verbs:      - get  - apiGroups: [""]    resources:      - nodes/status    verbs:      # Needed for clearing NodeNetworkUnavailable flag.      - patch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  name: calico-noderoleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: calico-nodesubjects:- kind: ServiceAccount  name: calico-node  namespace: kube-system---# Source: calico/templates/calico-node.yaml# This manifest installs the calico-node container, as well# as the CNI plugins and network config on# each master and worker node in a Kubernetes cluster.kind: DaemonSetapiVersion: apps/v1metadata:  name: calico-node  namespace: kube-system  labels:    k8s-app: calico-nodespec:  selector:    matchLabels:      k8s-app: calico-node  updateStrategy:    type: RollingUpdate    rollingUpdate:      maxUnavailable: 1  template:    metadata:      labels: k8s-app: calico-node    spec:      nodeSelector: kubernetes.io/os: linux      hostNetwork: true      tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule   operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly   operator: Exists - effect: NoExecute   operator: Exists      serviceAccountName: calico-node      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.      terminationGracePeriodSeconds: 0      priorityClassName: system-node-critical      initContainers: # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni   image: docker.io/calico/cni:v3.20.1   command: ["/opt/cni/bin/install"]   envFrom:   - configMapRef:# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.name: kubernetes-services-endpointoptional: true   env:     # Name of the CNI config file to create.     - name: CNI_CONF_NAMEvalue: "10-calico.conflist"     # The CNI network config to install on each node.     - name: CNI_NETWORK_CONFIGvalueFrom:  configMapKeyRef:    name: calico-config    key: cni_network_config     # The location of the etcd cluster.     - name: ETCD_ENDPOINTSvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_endpoints     # CNI MTU Config variable     - name: CNI_MTUvalueFrom:  configMapKeyRef:    name: calico-config    key: veth_mtu     # Prevents the container from sleeping forever.     - name: SLEEPvalue: "false"   volumeMounts:     - mountPath: /host/opt/cni/binname: cni-bin-dir     - mountPath: /host/etc/cni/net.dname: cni-net-dir     - mountPath: /calico-secretsname: etcd-certs   securityContext:     privileged: true # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # to communicate with Felix over the Policy Sync API. - name: flexvol-driver   image: docker.io/calico/pod2daemon-flexvol:v3.20.1   volumeMounts:   - name: flexvol-driver-host     mountPath: /host/driver   securityContext:     privileged: true      containers: # Runs calico-node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node   image: docker.io/calico/node:v3.20.1   envFrom:   - configMapRef:# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.name: kubernetes-services-endpointoptional: true   env:     # The location of the etcd cluster.     - name: ETCD_ENDPOINTSvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_endpoints     # Location of the CA certificate for etcd.     - name: ETCD_CA_CERT_FILEvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_ca     # Location of the client key for etcd.     - name: ETCD_KEY_FILEvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_key     # Location of the client certificate for etcd.     - name: ETCD_CERT_FILEvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_cert     # Set noderef for node controller.     - name: CALICO_K8S_NODE_REFvalueFrom:  fieldRef:    fieldPath: spec.nodeName     # Choose the backend to use.     - name: CALICO_NETWORKING_BACKENDvalueFrom:  configMapKeyRef:    name: calico-config    key: calico_backend     # Cluster type to identify the deployment type     - name: CLUSTER_TYPEvalue: "k8s,bgp"     # Auto-detect the BGP IP address.     - name: IPvalue: "autodetect"     # Enable IPIP     - name: CALICO_IPV4POOL_IPIPvalue: "Always"     # Enable or Disable VXLAN on the default IP pool.     - name: CALICO_IPV4POOL_VXLANvalue: "Never"     # Set MTU for tunnel device used if ipip is enabled     - name: FELIX_IPINIPMTUvalueFrom:  configMapKeyRef:    name: calico-config    key: veth_mtu     # Set MTU for the VXLAN tunnel device.     - name: FELIX_VXLANMTUvalueFrom:  configMapKeyRef:    name: calico-config    key: veth_mtu     # Set MTU for the Wireguard tunnel device.     - name: FELIX_WIREGUARDMTUvalueFrom:  configMapKeyRef:    name: calico-config    key: veth_mtu     # The default IPv4 pool to create on startup if none exists. Pod IPs will be     # chosen from this range. Changing this value after installation will have     # no effect. This should fall within `--cluster-cidr`.     - name: CALICO_IPV4POOL_CIDRvalue: "10.70.2.0/24"     # Disable file logging so `kubectl logs` works.     - name: CALICO_DISABLE_FILE_LOGGINGvalue: "true"     # Set Felix endpoint to host default action to ACCEPT.     - name: FELIX_DEFAULTENDPOINTTOHOSTACTIONvalue: "ACCEPT"     # Disable IPv6 on Kubernetes.     - name: FELIX_IPV6SUPPORTvalue: "false"     - name: FELIX_HEALTHENABLEDvalue: "true"   securityContext:     privileged: true   resources:     requests:cpu: 250m   lifecycle:     preStop:exec:  command:  - /bin/calico-node  - -shutdown   livenessProbe:     exec:command:- /bin/calico-node- -felix-live- -bird-live     periodSeconds: 10     initialDelaySeconds: 10     failureThreshold: 6     timeoutSeconds: 10   readinessProbe:     exec:command:- /bin/calico-node- -felix-ready- -bird-ready     periodSeconds: 10     timeoutSeconds: 10   volumeMounts:     # For maintaining CNI plugin API credentials.     - mountPath: /host/etc/cni/net.dname: cni-net-dirreadOnly: false     - mountPath: /lib/modulesname: lib-modulesreadOnly: true     - mountPath: /run/xtables.lockname: xtables-lockreadOnly: false     - mountPath: /var/run/caliconame: var-run-calicoreadOnly: false     - mountPath: /var/lib/caliconame: var-lib-calicoreadOnly: false     - mountPath: /calico-secretsname: etcd-certs     - name: policysyncmountPath: /var/run/nodeagent     # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the     # parent directory.     - name: sysfsmountPath: /sys/fs/# Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host.# If the host is known to mount that filesystem already then Bidirectional can be omitted.mountPropagation: Bidirectional     - name: cni-log-dirmountPath: /var/log/calico/cnireadOnly: true      volumes: # Used by calico-node. - name: lib-modules   hostPath:     path: /lib/modules - name: var-run-calico   hostPath:     path: /var/run/calico - name: var-lib-calico   hostPath:     path: /var/lib/calico - name: xtables-lock   hostPath:     path: /run/xtables.lock     type: FileOrCreate - name: sysfs   hostPath:     path: /sys/fs/     type: DirectoryOrCreate # Used to install CNI. - name: cni-bin-dir   hostPath:     path: /opt/cni/bin - name: cni-net-dir   hostPath:     path: /etc/cni/net.d # Used to access CNI logs. - name: cni-log-dir   hostPath:     path: /var/log/calico/cni # Mount in the etcd TLS secrets with mode 400. # See https://kubernetes.io/docs/concepts/configuration/secret/ - name: etcd-certs   secret:     secretName: calico-etcd-secrets     defaultMode: 0400 # Used to create per-pod Unix Domain Sockets - name: policysync   hostPath:     type: DirectoryOrCreate     path: /var/run/nodeagent # Used to install Flex Volume Driver - name: flexvol-driver-host   hostPath:     type: DirectoryOrCreate     path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds---apiVersion: v1kind: ServiceAccountmetadata:  name: calico-node  namespace: kube-system---# Source: calico/templates/calico-kube-controllers.yaml# See https://github.com/projectcalico/kube-controllersapiVersion: apps/v1kind: Deploymentmetadata:  name: calico-kube-controllers  namespace: kube-system  labels:    k8s-app: calico-kube-controllersspec:  # The controllers can only have a single active instance.  replicas: 1  selector:    matchLabels:      k8s-app: calico-kube-controllers  strategy:    type: Recreate  template:    metadata:      name: calico-kube-controllers      namespace: kube-system      labels: k8s-app: calico-kube-controllers    spec:      nodeSelector: kubernetes.io/os: linux      tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly   operator: Exists - key: node-role.kubernetes.io/master   effect: NoSchedule      serviceAccountName: calico-kube-controllers      priorityClassName: system-cluster-critical      # The controllers must run in the host network namespace so that      # it isn't governed by policy that would prevent it from working.      hostNetwork: true      containers: - name: calico-kube-controllers   image: docker.io/calico/kube-controllers:v3.20.1   env:     # The location of the etcd cluster.     - name: ETCD_ENDPOINTSvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_endpoints     # Location of the CA certificate for etcd.     - name: ETCD_CA_CERT_FILEvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_ca     # Location of the client key for etcd.     - name: ETCD_KEY_FILEvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_key     # Location of the client certificate for etcd.     - name: ETCD_CERT_FILEvalueFrom:  configMapKeyRef:    name: calico-config    key: etcd_cert     # Choose which controllers to run.     - name: ENABLED_CONTROLLERSvalue: policy,namespace,serviceaccount,workloadendpoint,node   volumeMounts:     # Mount in the etcd TLS secrets.     - mountPath: /calico-secretsname: etcd-certs   livenessProbe:     exec:command:- /usr/bin/check-status- -l     periodSeconds: 10     initialDelaySeconds: 10     failureThreshold: 6     timeoutSeconds: 10   readinessProbe:     exec:command:- /usr/bin/check-status- -r     periodSeconds: 10      volumes: # Mount in the etcd TLS secrets with mode 400. # See https://kubernetes.io/docs/concepts/configuration/secret/ - name: etcd-certs   secret:     secretName: calico-etcd-secrets     defaultMode: 0440---apiVersion: v1kind: ServiceAccountmetadata:  name: calico-kube-controllers  namespace: kube-system---# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evictapiVersion: policy/v1beta1kind: PodDisruptionBudgetmetadata:  name: calico-kube-controllers  namespace: kube-system  labels:    k8s-app: calico-kube-controllersspec:  maxUnavailable: 1  selector:    matchLabels:      k8s-app: calico-kube-controllers---# Source: calico/templates/calico-typha.yaml---# Source: calico/templates/configure-canal.yaml---# Source: calico/templates/kdd-crds.yaml

部署coredns

# 将service的第10IP修改一下cat > coredns.yaml <<EOFapiVersion: v1kind: ServiceAccountmetadata:  name: coredns  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    kubernetes.io/bootstrapping: rbac-defaults  name: system:corednsrules:  - apiGroups:    - ""    resources:    - endpoints    - services    - pods    - namespaces    verbs:    - list    - watch  - apiGroups:    - discovery.k8s.io    resources:    - endpointslices    verbs:    - list    - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  annotations:    rbac.authorization.kubernetes.io/autoupdate: "true"  labels:    kubernetes.io/bootstrapping: rbac-defaults  name: system:corednsroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:corednssubjects:- kind: ServiceAccount  name: coredns  namespace: kube-system---apiVersion: v1kind: ConfigMapmetadata:  name: coredns  namespace: kube-systemdata:  Corefile: |    .:53 { errors health {   lameduck 5s } ready kubernetes cluster.local in-addr.arpa ip6.arpa {   fallthrough in-addr.arpa ip6.arpa } prometheus :9153 forward . /etc/resolv.conf {   max_concurrent 1000 } cache 30 loop reload loadbalance    }---apiVersion: apps/v1kind: Deploymentmetadata:  name: coredns  namespace: kube-system  labels:    k8s-app: kube-dns    kubernetes.io/name: "CoreDNS"spec:  # replicas: not specified here:  # 1. Default is 1.  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.  strategy:    type: RollingUpdate    rollingUpdate:      maxUnavailable: 1  selector:    matchLabels:      k8s-app: kube-dns  template:    metadata:      labels: k8s-app: kube-dns    spec:      priorityClassName: system-cluster-critical      serviceAccountName: coredns      tolerations: - key: "CriticalAddonsOnly"   operator: "Exists"      nodeSelector: kubernetes.io/os: linux      affinity:  podAntiAffinity:    preferredDuringSchedulingIgnoredDuringExecution:    - weight: 100      podAffinityTerm: labelSelector:   matchExpressions:     - key: k8s-appoperator: Invalues: ["kube-dns"] topologyKey: kubernetes.io/hostname      containers:      - name: coredns image: coredns/coredns:1.7.0 imagePullPolicy: IfNotPresent resources:   limits:     memory: 170Mi   requests:     cpu: 100m     memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume   mountPath: /etc/coredns   readOnly: true ports: - containerPort: 53   name: dns   protocol: UDP - containerPort: 53   name: dns-tcp   protocol: TCP - containerPort: 9153   name: metrics   protocol: TCP securityContext:   allowPrivilegeEscalation: false   capabilities:     add:     - NET_BIND_SERVICE     drop:     - all   readOnlyRootFilesystem: true livenessProbe:   httpGet:     path: /health     port: 8080     scheme: HTTP   initialDelaySeconds: 60   timeoutSeconds: 5   successThreshold: 1   failureThreshold: 5 readinessProbe:   httpGet:     path: /ready     port: 8181     scheme: HTTP      dnsPolicy: Default      volumes: - name: config-volume   configMap:     name: coredns     items:     - key: Corefilepath: Corefile---apiVersion: v1kind: Servicemetadata:  name: kube-dns  namespace: kube-system  annotations:    prometheus.io/port: "9153"    prometheus.io/scrape: "true"  labels:    k8s-app: kube-dns    kubernetes.io/cluster-service: "true"    kubernetes.io/name: "CoreDNS"spec:  selector:    k8s-app: kube-dns  clusterIP: 10.0.0.10  ports:  - name: dns    port: 53    protocol: UDP  - name: dns-tcp    port: 53    protocol: TCP  - name: metrics    port: 9153    protocol: TCPEOF# 查看pod[root@k8s-master-1 yaml]# kubectl get pods -ANAMESPACE     NAME    READY   STATUS    RESTARTS   AGEkube-system   calico-kube-controllers-544c9b996f-kp4dt   1/1     Running   0   4m38skube-system   calico-node-926km     1/1     Running   0   4m39skube-system   calico-node-9w4bz     1/1     Running   0   4m39skube-system   calico-node-b4cr2     1/1     Running   0   4m39skube-system   calico-node-dx6gm     1/1     Running   0   4m39skube-system   calico-node-mdmgr     1/1     Running   0   4m39skube-system   coredns-7bf4bd64bd-lmgks     1/1     Running   0   63s

部署metric server

# 创建metrics-server配置文件,要根据本机实际地方修改一下/etc/kubernetes/ssl这个,metric需要使用聚合证书cat > metrics-server.yaml <<EOFapiVersion: v1kind: ServiceAccountmetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    k8s-app: metrics-server    rbac.authorization.k8s.io/aggregate-to-admin: "true"    rbac.authorization.k8s.io/aggregate-to-edit: "true"    rbac.authorization.k8s.io/aggregate-to-view: "true"  name: system:aggregated-metrics-readerrules:- apiGroups:  - metrics.k8s.io  resources:  - pods  - nodes  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    k8s-app: metrics-server  name: system:metrics-serverrules:- apiGroups:  - ""  resources:  - pods  - nodes  - nodes/stats  - namespaces  - configmaps  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata:  labels:    k8s-app: metrics-server  name: metrics-server-auth-reader  namespace: kube-systemroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: extension-apiserver-authentication-readersubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  labels:    k8s-app: metrics-server  name: metrics-server:system:auth-delegatorroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:auth-delegatorsubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  labels:    k8s-app: metrics-server  name: system:metrics-serverroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:metrics-serversubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: v1kind: Servicemetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-systemspec:  ports:  - name: https    port: 443    protocol: TCP    targetPort: https  selector:    k8s-app: metrics-server---apiVersion: apps/v1kind: Deploymentmetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-systemspec:  selector:    matchLabels:      k8s-app: metrics-server  strategy:    rollingUpdate:      maxUnavailable: 0  template:    metadata:      labels: k8s-app: metrics-server    spec:      nodeName: k8s-master-1      tolerations:      - key: "node-role.kubernetes.io/master" operator: "Exists"      priorityClassName: system-cluster-critical      serviceAccountName: metrics-server      containers:      - args: - --cert-dir=/tmp - --secure-port=4443 - --metric-resolution=30s - --kubelet-insecure-tls - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --requestheader-client-ca-file=/etc/kubernetes/ssl/front-proxy-ca.pem - --requestheader-username-headers=X-Remote-User - --requestheader-group-headers=X-Remote-Group - --requestheader-extra-headers-prefix=X-Remote-Extra- image: registry.aliyuncs.com/google_containers/metrics-server:v0.4.1 imagePullPolicy: IfNotPresent livenessProbe:   failureThreshold: 3   httpGet:     path: /livez     port: https     scheme: HTTPS   periodSeconds: 10 name: metrics-server ports: - containerPort: 4443   name: https   protocol: TCP readinessProbe:   failureThreshold: 3   httpGet:     path: /readyz     port: https     scheme: HTTPS   periodSeconds: 10 securityContext:   readOnlyRootFilesystem: true   runAsNonRoot: true   runAsUser: 1000 volumeMounts: - mountPath: /tmp   name: tmp-dir - name: ca-ssl   mountPath: /etc/kubernetes/ssl      volumes:      - emptyDir: {} name: tmp-dir      - name: ca-ssl hostPath:   path: /etc/kubernetes/ssl---apiVersion: apiregistration.k8s.io/v1kind: APIServicemetadata:  labels:    k8s-app: metrics-server  name: v1beta1.metrics.k8s.iospec:  group: metrics.k8s.io  groupPriorityMinimum: 100  insecureSkipTLSVerify: true  service:    name: metrics-server    namespace: kube-system  version: v1beta1  versionPriority: 100EOF# 获取node信息[root@k8s-master-1 yaml]# kubectl top nodesNAME    CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   k8s-master-1   124m  6%     1989Mi   52%k8s-master-2   108m  5%     1482Mi   38%k8s-master-3   109m  5%     1437Mi   37%k8s-node-1     60m   3%     907Mi    23%k8s-node-2     56m   2%     879Mi    23% # 查看pod运行[root@k8s-master-1 yaml]# kubectl get pods -ANAMESPACE     NAME    READY   STATUS    RESTARTS   AGEkube-system   calico-kube-controllers-544c9b996f-kp4dt   1/1     Running   0   12mkube-system   calico-node-926km     1/1     Running   0   12mkube-system   calico-node-9w4bz     1/1     Running   0   12mkube-system   calico-node-b4cr2     1/1     Running   0   12mkube-system   calico-node-dx6gm     1/1     Running   0   12mkube-system   calico-node-mdmgr     1/1     Running   0   12mkube-system   coredns-7bf4bd64bd-lmgks     1/1     Running   0   9m16skube-system   metrics-server-68bdbcc6b-w44qg      1/1     Running   0   4m46s

测试集群网络

注:

  1. busybox最好选用1.28,最新版本有BUG

  2. Pod必须能解析Service

  3. Pod必须能解析跨namespace的Service

  4. 每个节点必须要能访问Kubernetes的kubenetes svc 10.0.0.10:443和kube-dns的service 10.0.0.10:53

  5. Pod和Pod之间要能通信

    • 同namespace能通信
    • 跨namespace能通信
    • 跨机器能通信

创建测试pod

cat << EOF | kubectl apply -f -apiVersion: v1kind: Podmetadata:  name: busybox-1  namespace: defaultspec:  nodeSelector:    node-role.kubernetes.io/master: ""  tolerations:  - key: node-role.kubernetes.io/master    operator: Exists  containers:  - name: busybox    image: busybox:1.28    imagePullPolicy: IfNotPresent    command:    - sleep    - "86400"  restartPolicy: OnFailure---apiVersion: v1kind: Podmetadata:  name: busybox-2  namespace: defaultspec:  nodeSelector:    node-role.kubernetes.io/node: ""  containers:  - name: busybox    image: busybox:1.28    imagePullPolicy: IfNotPresent    command:    - sleep    - "86400"  restartPolicy: OnFailureEOF# 查看当前存在svc[root@k8s-master-1 yaml]# kubectl get svc -ANAMESPACE     NAME      TYPE CLUSTER-IP    EXTERNAL-IP   PORT(S)    AGEdefaultkubernetesClusterIP   10.0.0.1      <none> 443/TCP    152mkube-system   kube-dns  ClusterIP   10.0.0.10     <none> 53/UDP,53/TCP,9153/TCP   10mkube-system   metrics-server   ClusterIP   10.0.26.245   <none> 443/TCP    5m42s# 查看当前pod运行情况[root@k8s-master-1 yaml]#  kubectl get pods -A -o wideNAMESPACE     NAME    READY   STATUS    RESTARTS   AGE     IP      NODE    NOMINATED NODE   READINESS GATESdefaultbusybox-1      1/1     Running   0   41s     10.70.182.65   k8s-master-2   <none>    <none>defaultbusybox-2      1/1     Running   0   41s     10.70.109.65   k8s-node-1     <none>    <none>kube-system   calico-kube-controllers-544c9b996f-kp4dt   1/1     Running   0   14m     192.168.0.21   k8s-node-1     <none>    <none>kube-system   calico-node-926km     1/1     Running   0   14m     192.168.0.22   k8s-node-2     <none>    <none>kube-system   calico-node-9w4bz     1/1     Running   0   14m     192.168.0.11   k8s-master-1   <none>    <none>kube-system   calico-node-b4cr2     1/1     Running   0   14m     192.168.0.13   k8s-master-3   <none>    <none>kube-system   calico-node-dx6gm     1/1     Running   0   14m     192.168.0.12   k8s-master-2   <none>    <none>kube-system   calico-node-mdmgr     1/1     Running   0   14m     192.168.0.21   k8s-node-1     <none>    <none>kube-system   coredns-7bf4bd64bd-lmgks     1/1     Running   0   10m     10.70.140.65   k8s-node-2     <none>    <none>kube-system   metrics-server-68bdbcc6b-w44qg      1/1     Running   0   6m17s   10.70.196.1    k8s-master-1   <none>    <none>

测试pod解析service

# 测试解析同一个namespace下的service[root@k8s-master-1 yaml]# kubectl exec busybox-1 -- nslookup kubernetesServer:    10.0.0.10Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.localName:      kubernetesAddress 1: 10.0.0.1 kubernetes.default.svc.cluster.local# 跨namespace解析service[root@k8s-master-1 yaml]# kubectl exec busybox-1 -- nslookup kube-dns.kube-systemServer:    10.0.0.10Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.localName:      kube-dns.kube-systemAddress 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local

测试节点访问kubernetes svc

# 每个节点都测试一下[root@k8s-master-1 ~]# telnet 10.0.0.1 443Trying 10.0.0.1...Connected to 10.0.0.1.Escape character is '^]'.^CConnection closed by foreign host.[root@k8s-master-1 ~]# telnet 10.0.0.10 53Trying 10.0.0.10...Connected to 10.0.0.10.Escape character is '^]'.

测试pod间通信

[root@k8s-master-1 yaml]# kubectl exec busybox-1 -- ping 10.70.109.65PING 10.70.109.65 (10.70.109.65): 56 data bytes64 bytes from 10.70.109.65: seq=0 ttl=62 time=0.525 ms64 bytes from 10.70.109.65: seq=1 ttl=62 time=0.258 ms^C

Ingress-nginx部署

官网链接

Github部署yaml

添加准入控制器

由于ingress-nginx pod使用了webhook,故而需要添加参数

# 在apiserver启动时添加MutatingAdmissionWebhook 、ValidatingAdmissionWebhook 参数--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook \

好心人的项目地址:https://github.com/anjia0532/gcr.io_mirror,对应的docker hub地址:https://hub.docker.com/u/anjia0532

# 下载最新deployment后image: k8s.gcr.io/ingress-nginx/controller:v1.1.2@sha256:28b11ce69e57843de44e3db6413e98d09de0f6688e33d4bd384002a44f78405c ->image: anjia0532/google-containers.ingress-nginx.controller:v1.1.2image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660 ->image: anjia0532/google-containers.ingress-nginx.kube-webhook-certgen:v1.1.1

部署

# Deployment修改点  template:  metadata:  labels:    app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller    spec:      dnsPolicy: ClusterFirstWithHostNet  #既能使用宿主机DNS,又能使用集群DNS      hostNetwork: true     #与宿主机共享网络      #nodeName: k8s-master-1#设置只能在k8s-master-1节点运行      #tolerations:    #设置能容忍master污点      #- key: node-role.kubernetes.io/master      #  operator: Exists      containers:    - name: controller   image: willdockerhub/ingress-nginx-controller:v1.0.0   imagePullPolicy: IfNotPresent   # 查看状态[root@k8s-master-1 ssl]# kubectl get pods -n ingress-nginx -l app.kubernetes.io/name=ingress-nginx NAME     READY   STATUS      RESTARTS   AGEingress-nginx-admission-create-gct7s 0/1     Completed   0   74singress-nginx-admission-patch-k8lhs  0/1     Completed   1   74singress-nginx-controller-558d9b6957-gjrg6   1/1     Running     0   74s
# 官方yamlapiVersion: v1kind: Namespacemetadata:  name: ingress-nginx  labels:    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx---# Source: ingress-nginx/templates/controller-serviceaccount.yamlapiVersion: v1kind: ServiceAccountmetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: ingress-nginx  namespace: ingress-nginxautomountServiceAccountToken: true---# Source: ingress-nginx/templates/controller-configmap.yamlapiVersion: v1kind: ConfigMapmetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: ingress-nginx-controller  namespace: ingress-nginxdata:---# Source: ingress-nginx/templates/clusterrole.yamlapiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm  name: ingress-nginxrules:  - apiGroups:      - ''    resources:      - configmaps      - endpoints      - nodes      - pods      - secrets    verbs:      - list      - watch  - apiGroups:      - ''    resources:      - nodes    verbs:      - get  - apiGroups:      - ''    resources:      - services    verbs:      - get      - list      - watch  - apiGroups:      - networking.k8s.io    resources:      - ingresses    verbs:      - get      - list      - watch  - apiGroups:      - ''    resources:      - events    verbs:      - create      - patch  - apiGroups:      - networking.k8s.io    resources:      - ingresses/status    verbs:      - update  - apiGroups:      - networking.k8s.io    resources:      - ingressclasses    verbs:      - get      - list      - watch---# Source: ingress-nginx/templates/clusterrolebinding.yamlapiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm  name: ingress-nginxroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: ingress-nginxsubjects:  - kind: ServiceAccount    name: ingress-nginx    namespace: ingress-nginx---# Source: ingress-nginx/templates/controller-role.yamlapiVersion: rbac.authorization.k8s.io/v1kind: Rolemetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: ingress-nginx  namespace: ingress-nginxrules:  - apiGroups:      - ''    resources:      - namespaces    verbs:      - get  - apiGroups:      - ''    resources:      - configmaps      - pods      - secrets      - endpoints    verbs:      - get      - list      - watch  - apiGroups:      - ''    resources:      - services    verbs:      - get      - list      - watch  - apiGroups:      - networking.k8s.io    resources:      - ingresses    verbs:      - get      - list      - watch  - apiGroups:      - networking.k8s.io    resources:      - ingresses/status    verbs:      - update  - apiGroups:      - networking.k8s.io    resources:      - ingressclasses    verbs:      - get      - list      - watch  - apiGroups:      - ''    resources:      - configmaps    resourceNames:      - ingress-controller-leader    verbs:      - get      - update  - apiGroups:      - ''    resources:      - configmaps    verbs:      - create  - apiGroups:      - ''    resources:      - events    verbs:      - create      - patch---# Source: ingress-nginx/templates/controller-rolebinding.yamlapiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: ingress-nginx  namespace: ingress-nginxroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: ingress-nginxsubjects:  - kind: ServiceAccount    name: ingress-nginx    namespace: ingress-nginx---# Source: ingress-nginx/templates/controller-service-webhook.yamlapiVersion: v1kind: Servicemetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: ingress-nginx-controller-admission  namespace: ingress-nginxspec:  type: ClusterIP  ports:    - name: https-webhook      port: 443      targetPort: webhook      appProtocol: https  selector:    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/component: controller---# Source: ingress-nginx/templates/controller-service.yamlapiVersion: v1kind: Servicemetadata:  annotations:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: ingress-nginx-controller  namespace: ingress-nginxspec:  type: NodePort  ports:    - name: http      port: 80      protocol: TCP      targetPort: http      appProtocol: http    - name: https      port: 443      protocol: TCP      targetPort: https      appProtocol: https  selector:    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/component: controller---# Source: ingress-nginx/templates/controller-deployment.yamlapiVersion: apps/v1kind: Deploymentmetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: ingress-nginx-controller  namespace: ingress-nginxspec:  selector:    matchLabels:      app.kubernetes.io/name: ingress-nginx      app.kubernetes.io/instance: ingress-nginx      app.kubernetes.io/component: controller  revisionHistoryLimit: 10  minReadySeconds: 0  template:    metadata:      labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller    spec:      dnsPolicy: ClusterFirst      containers: - name: controller   image: k8s.gcr.io/ingress-nginx/controller:v1.0.0@sha256:0851b34f69f69352bf168e6ccf30e1e20714a264ab1ecd1933e4d8c0fc3215c6   imagePullPolicy: IfNotPresent   lifecycle:     preStop:exec:  command:    - /wait-shutdown   args:     - /nginx-ingress-controller     - --election-id=ingress-controller-leader     - --controller-class=k8s.io/ingress-nginx     - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller     - --validating-webhook=:8443     - --validating-webhook-certificate=/usr/local/certificates/cert     - --validating-webhook-key=/usr/local/certificates/key   securityContext:     capabilities:drop:  - ALLadd:  - NET_BIND_SERVICE     runAsUser: 101     allowPrivilegeEscalation: true   env:     - name: POD_NAMEvalueFrom:  fieldRef:    fieldPath: metadata.name     - name: POD_NAMESPACEvalueFrom:  fieldRef:    fieldPath: metadata.namespace     - name: LD_PRELOADvalue: /usr/local/lib/libmimalloc.so   livenessProbe:     failureThreshold: 5     httpGet:path: /healthzport: 10254scheme: HTTP     initialDelaySeconds: 10     periodSeconds: 10     successThreshold: 1     timeoutSeconds: 1   readinessProbe:     failureThreshold: 3     httpGet:path: /healthzport: 10254scheme: HTTP     initialDelaySeconds: 10     periodSeconds: 10     successThreshold: 1     timeoutSeconds: 1   ports:     - name: httpcontainerPort: 80protocol: TCP     - name: httpscontainerPort: 443protocol: TCP     - name: webhookcontainerPort: 8443protocol: TCP   volumeMounts:     - name: webhook-certmountPath: /usr/local/certificates/readOnly: true   resources:     requests:cpu: 100mmemory: 90Mi      nodeSelector: kubernetes.io/os: linux      serviceAccountName: ingress-nginx      terminationGracePeriodSeconds: 300      volumes: - name: webhook-cert   secret:     secretName: ingress-nginx-admission---# Source: ingress-nginx/templates/controller-ingressclass.yaml# We don't support namespaced ingressClass yet# So a ClusterRole and a ClusterRoleBinding is requiredapiVersion: networking.k8s.io/v1kind: IngressClassmetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: controller  name: nginx  namespace: ingress-nginxspec:  controller: k8s.io/ingress-nginx---# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml# before changing this value, check the required kubernetes version# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisitesapiVersion: admissionregistration.k8s.io/v1kind: ValidatingWebhookConfigurationmetadata:  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhook  name: ingress-nginx-admissionwebhooks:  - name: validate.nginx.ingress.kubernetes.io    matchPolicy: Equivalent    rules:      - apiGroups:   - networking.k8s.io apiVersions:   - v1 operations:   - CREATE   - UPDATE resources:   - ingresses    failurePolicy: Fail    sideEffects: None    admissionReviewVersions:      - v1    clientConfig:      service: namespace: ingress-nginx name: ingress-nginx-controller-admission path: /networking/v1/ingresses---# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yamlapiVersion: v1kind: ServiceAccountmetadata:  name: ingress-nginx-admission  namespace: ingress-nginx  annotations:    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhook---# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yamlapiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  name: ingress-nginx-admission  annotations:    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhookrules:  - apiGroups:      - admissionregistration.k8s.io    resources:      - validatingwebhookconfigurations    verbs:      - get      - update---# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yamlapiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  name: ingress-nginx-admission  annotations:    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhookroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: ingress-nginx-admissionsubjects:  - kind: ServiceAccount    name: ingress-nginx-admission    namespace: ingress-nginx---# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yamlapiVersion: rbac.authorization.k8s.io/v1kind: Rolemetadata:  name: ingress-nginx-admission  namespace: ingress-nginx  annotations:    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhookrules:  - apiGroups:      - ''    resources:      - secrets    verbs:      - get      - create---# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yamlapiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata:  name: ingress-nginx-admission  namespace: ingress-nginx  annotations:    helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhookroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: ingress-nginx-admissionsubjects:  - kind: ServiceAccount    name: ingress-nginx-admission    namespace: ingress-nginx---# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yamlapiVersion: batch/v1kind: Jobmetadata:  name: ingress-nginx-admission-create  namespace: ingress-nginx  annotations:    helm.sh/hook: pre-install,pre-upgrade    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhookspec:  template:    metadata:      name: ingress-nginx-admission-create      labels: helm.sh/chart: ingress-nginx-4.0.1 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook    spec:      containers: - name: create   image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068   imagePullPolicy: IfNotPresent   args:     - create     - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc     - --namespace=$(POD_NAMESPACE)     - --secret-name=ingress-nginx-admission   env:     - name: POD_NAMESPACEvalueFrom:  fieldRef:    fieldPath: metadata.namespace      restartPolicy: OnFailure      serviceAccountName: ingress-nginx-admission      nodeSelector: kubernetes.io/os: linux      securityContext: runAsNonRoot: true runAsUser: 2000---# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yamlapiVersion: batch/v1kind: Jobmetadata:  name: ingress-nginx-admission-patch  namespace: ingress-nginx  annotations:    helm.sh/hook: post-install,post-upgrade    helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded  labels:    helm.sh/chart: ingress-nginx-4.0.1    app.kubernetes.io/name: ingress-nginx    app.kubernetes.io/instance: ingress-nginx    app.kubernetes.io/version: 1.0.0    app.kubernetes.io/managed-by: Helm    app.kubernetes.io/component: admission-webhookspec:  template:    metadata:      name: ingress-nginx-admission-patch      labels: helm.sh/chart: ingress-nginx-4.0.1 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 1.0.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook    spec:      containers: - name: patch   image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068   imagePullPolicy: IfNotPresent   args:     - patch     - --webhook-name=ingress-nginx-admission     - --namespace=$(POD_NAMESPACE)     - --patch-mutating=false     - --secret-name=ingress-nginx-admission     - --patch-failure-policy=Fail   env:     - name: POD_NAMESPACEvalueFrom:  fieldRef:    fieldPath: metadata.namespace      restartPolicy: OnFailure      serviceAccountName: ingress-nginx-admission      nodeSelector: kubernetes.io/os: linux      securityContext: runAsNonRoot: true runAsUser: 2000

在线短网址网站