https://github.com/cby-chen/Kubernetes 开源不易,帮忙点个star,谢谢了
kubernetes(k8s)二进制高可用安装部署,支持IPv4+IPv6双栈。
我使用IPV6的目的是在公网进行访问,所以我配置了IPV6静态地址。
若您没有IPV6环境,或者不想使用IPv6,不对主机进行配置IPv6地址即可。
不配置IPV6,不影响后续,不过集群依旧是支持IPv6的。为后期留有扩展可能性。
若不要IPv6 ,不给网卡配置IPv6即可,不要对IPv6相关配置删除或操作,否则会出问题。
主机名称 | IP地址 | 说明 | 软件 |
---|---|---|---|
192.168.1.60 | 外网节点 | 下载各种所需安装包 | |
Master01 | 3.7.191.61 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、 kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx |
Master02 | 3.7.191.62 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、 kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx |
Master03 | 3.7.191.63 | master节点 | kube-apiserver、kube-controller-manager、kube-scheduler、etcd、 kubelet、kube-proxy、nfs-client、haproxy、keepalived、nginx |
Node01 | 3.7.191.64 | node节点 | kubelet、kube-proxy、nfs-client、nginx |
Node02 | 3.7.191.65 | node节点 | kubelet、kube-proxy、nfs-client、nginx |
3.7.191.66 | VIP |
软件 | 版本 |
---|---|
kernel | 5.4.231 |
CentOS 8 | v8、 v7、Ubuntu |
kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy | v1.26.1 |
etcd | v3.5.7 |
containerd | v1.6.16 |
docker | v20.10.23 |
cfssl | v1.6.3 |
cni | v1.2.0 |
crictl | v1.26.0 |
haproxy | v1.8.27 |
keepalived | v2.1.5 |
网段
物理主机:192.168.1.0/24
service:10.96.0.0/12
pod:172.16.0.0/12
安装包已经整理好:https://github.com/cby-chen/Kubernetes/releases/download/v1.26.1/kubernetes-v1.26.1.tar
ssh root@3.7.191.245 "nmcli con mod eth0 ipv4.addresses 3.7.191.61/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@3.7.191.247 "nmcli con mod eth0 ipv4.addresses 3.7.191.62/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@3.7.191.244 "nmcli con mod eth0 ipv4.addresses 3.7.191.63/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@3.7.191.241 "nmcli con mod eth0 ipv4.addresses 3.7.191.64/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
ssh root@3.7.191.246 "nmcli con mod eth0 ipv4.addresses 3.7.191.65/24; nmcli con mod eth0 ipv4.gateway 3.7.191.1; nmcli con mod eth0 ipv4.method manual; nmcli con mod eth0 ipv4.dns "8.8.8.8"; nmcli con up eth0"
# 没有IPv6选择不配置即可
ssh root@3.7.191.61 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::10; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@3.7.191.62 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::20; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@3.7.191.63 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::30; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@3.7.191.64 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::40; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
ssh root@3.7.191.65 "nmcli con mod eth0 ipv6.addresses fc00:43f4:1eea:1::50; nmcli con mod eth0 ipv6.gateway fc00:43f4:1eea:1::1; nmcli con mod eth0 ipv6.method manual; nmcli con mod eth0 ipv6.dns "2400:3200::1"; nmcli con up eth0"
# 查看网卡配置
[root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=no
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=eth0
UUID=424fd260-c480-4899-97e6-6fc9722031e8
DEVICE=eth0
ONBOOT=yes
IPADDR=3.7.191.61
PREFIX=24
GATEWAY=192.168.8.1
DNS1=8.8.8.8
IPV6ADDR=fc00:43f4:1eea:1::10/128
IPV6_DEFAULTGW=fc00:43f4:1eea:1::1
DNS2=2400:3200::1
[root@localhost ~]#
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-master02
hostnamectl set-hostname k8s-master03
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
# 对于 Ubuntu
sed -i 's/cn.archive.ubuntu.com/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
# 对于 CentOS 7
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://mirror.centos.org|baseurl=https://mirrors.tuna.tsinghua.edu.cn|g' \
-i.bak \
/etc/yum.repos.d/CentOS-*.repo
# 对于 CentOS 8
sudo sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://mirror.centos.org/$contentdir|baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos|g' \
-i.bak \
/etc/yum.repos.d/CentOS-*.repo
# 对于私有仓库
sed -e 's|^mirrorlist=|#mirrorlist=|g' -e 's|^#baseurl=http://mirror.centos.org/\$contentdir|baseurl=http://192.168.1.123/centos|g' -i.bak /etc/yum.repos.d/CentOS-*.repo
# 对于 Ubuntu
apt update && apt upgrade -y && apt install -y wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl
# 对于 CentOS 7
yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl
# 对于 CentOS 8
yum update -y && yum -y install wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl
在互联网服务器上安装一个一模一样的系统进行下载所需包
# 下载必要工具
yum -y install createrepo yum-utils wget epel*
# 下载全量依赖包
repotrack createrepo wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp
# 删除libseccomp
rm -rf libseccomp-*.rpm
# 下载libseccomp
wget http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm
# 创建yum源信息
createrepo -u -d /data/centos7/
# 拷贝包到内网机器上
scp -r /data/centos7/ root@3.7.191.61:
scp -r /data/centos7/ root@3.7.191.62:
scp -r /data/centos7/ root@3.7.191.63:
scp -r /data/centos7/ root@3.7.191.64:
scp -r /data/centos7/ root@3.7.191.65:
# 在内网机器上创建repo配置文件
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo << EOF
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos7/
gpgcheck=0
enabled=1
EOF
# 安装下载好的包
yum clean all
yum makecache
yum install /root/centos7/* --skip-broken -y
#### 备注 #####
# 安装完成后,可能还会出现yum无法使用那么再次执行
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo << EOF
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos7/
gpgcheck=0
enabled=1
EOF
yum clean all
yum makecache
yum install /root/centos7/* --skip-broken -y
#### 备注 #####
# 安装 chrony 和 libseccomp
# yum install /root/centos7/libseccomp-2.5.1*.rpm -y
# yum install /root/centos7/chrony-*.rpm -y
# 下载必要工具
yum -y install createrepo yum-utils wget epel*
# 下载全量依赖包
repotrack wget psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp
# 创建yum源信息
createrepo -u -d /data/centos8/
# 拷贝包到内网机器上
scp -r centos8/ root@3.7.191.61:
scp -r centos8/ root@3.7.191.62:
scp -r centos8/ root@3.7.191.63:
scp -r centos8/ root@3.7.191.64:
scp -r centos8/ root@3.7.191.65:
# 在内网机器上创建repo配置文件
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo << EOF
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos8/
gpgcheck=0
enabled=1
EOF
# 安装下载好的包
yum clean all
yum makecache
yum install /root/centos8/* --skip-broken -y
#### 备注 #####
# 安装完成后,可能还会出现yum无法使用那么再次执行
rm -rf /etc/yum.repos.d/*
cat > /etc/yum.repos.d/123.repo << EOF
[cby]
name=CentOS-$releasever - Media
baseurl=file:///root/centos8/
gpgcheck=0
enabled=1
EOF
yum clean all
yum makecache
yum install /root/centos8/* --skip-broken -y
#!/bin/bash
logfile=123.log
ret=""
function getDepends()
{
echo "fileName is" $1>>$logfile
# use tr to del < >
ret=`apt-cache depends $1|grep Depends |cut -d: -f2 |tr -d "<>"`
echo $ret|tee -a $logfile
}
# 需要获取其所依赖包的包
libs="wget psmisc vim net-tools nfs-kernel-server telnet lvm2 git tar curl gcc keepalived haproxy bash-completion chrony sshpass ipvsadm ipset sysstat conntrack libseccomp"
# download libs dependen. deep in 3
i=0
while [ $i -lt 3 ] ;
do
let i++
echo $i
# download libs
newlist=" "
for j in $libs
do
added="$(getDepends $j)"
newlist="$newlist $added"
apt install $added --reinstall -d -y
done
libs=$newlist
done
# 创建源信息
apt install dpkg-dev
sudo cp /var/cache/apt/archives/*.deb /data/ubuntu/ -r
dpkg-scanpackages . /dev/null |gzip > /data/ubuntu/Packages.gz -r
# 拷贝包到内网机器上
scp -r ubuntu/ root@3.7.191.61:
scp -r ubuntu/ root@3.7.191.62:
scp -r ubuntu/ root@3.7.191.63:
scp -r ubuntu/ root@3.7.191.64:
scp -r ubuntu/ root@3.7.191.65:
# 在内网机器上配置apt源
vim /etc/apt/sources.list
cat /etc/apt/sources.list
deb file:////root/ ubuntu/
# 安装deb包
apt install ./*.deb
#!/bin/bash
# 查看版本地址:
#
# https://github.com/containernetworking/plugins/releases/
# https://github.com/containerd/containerd/releases/
# https://github.com/kubernetes-sigs/cri-tools/releases/
# https://github.com/Mirantis/cri-dockerd/releases/
# https://github.com/etcd-io/etcd/releases/
# https://github.com/cloudflare/cfssl/releases/
# https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
# https://download.docker.com/linux/static/stable/x86_64/
# https://github.com/opencontainers/runc/releases/
# https://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/
cni_plugins='v1.2.0'
cri_containerd_cni='1.6.16'
crictl='v1.26.0'
cri_dockerd='0.3.1'
etcd='v3.5.7'
cfssl='1.6.3'
cfssljson='1.6.3'
kubernetes_server='v1.26.1'
docker_v='20.10.23'
runc='1.1.4'
kernel='5.4.231'
if [ ! -f "kernel-lt-${kernel}-1.el7.elrepo.x86_64.rpm" ];then
wget http://mirrors.tuna.tsinghua.edu.cn/elrepo/kernel/el7/x86_64/RPMS/kernel-lt-${kernel}-1.el7.elrepo.x86_64.rpm
else
echo "文件存在"
fi
if [ ! -f "runc.amd64" ];then
wget https://mirrors.chenby.cn/https://github.com/opencontainers/runc/releases/download/v${runc}/runc.amd64
else
echo "文件存在"
fi
if [ ! -f "docker-${docker_v}.tgz" ];then
wget https://download.docker.com/linux/static/stable/x86_64/docker-${docker_v}.tgz
else
echo "文件存在"
fi
if [ ! -f "cni-plugins-linux-amd64-${cni_plugins}.tgz" ];then
wget https://mirrors.chenby.cn/https://github.com/containernetworking/plugins/releases/download/${cni_plugins}/cni-plugins-linux-amd64-${cni_plugins}.tgz
else
echo "文件存在"
fi
if [ ! -f "cri-containerd-cni-${cri_containerd_cni}-linux-amd64.tar.gz" ];then
wget https://mirrors.chenby.cn/https://github.com/containerd/containerd/releases/download/v${cri_containerd_cni}/cri-containerd-cni-${cri_containerd_cni}-linux-amd64.tar.gz
else
echo "文件存在"
fi
if [ ! -f "crictl-${crictl}-linux-amd64.tar.gz" ];then
wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/cri-tools/releases/download/${crictl}/crictl-${crictl}-linux-amd64.tar.gz
else
echo "文件存在"
fi
if [ ! -f "cri-dockerd-${cri_dockerd}.amd64.tgz" ];then
wget https://mirrors.chenby.cn/https://github.com/Mirantis/cri-dockerd/releases/download/v${cri_dockerd}/cri-dockerd-${cri_dockerd}.amd64.tgz
else
echo "文件存在"
fi
if [ ! -f "kubernetes-server-linux-amd64.tar.gz" ];then
wget https://dl.k8s.io/${kubernetes_server}/kubernetes-server-linux-amd64.tar.gz
else
echo "文件存在"
fi
if [ ! -f "etcd-${etcd}-linux-amd64.tar.gz" ];then
wget https://mirrors.chenby.cn/https://github.com/etcd-io/etcd/releases/download/${etcd}/etcd-${etcd}-linux-amd64.tar.gz
else
echo "文件存在"
fi
if [ ! -f "cfssl" ];then
wget https://mirrors.chenby.cn/https://github.com/cloudflare/cfssl/releases/download/v${cfssl}/cfssl_${cfssl}_linux_amd64 -O cfssl
else
echo "文件存在"
fi
if [ ! -f "cfssljson" ];then
wget https://mirrors.chenby.cn/https://github.com/cloudflare/cfssl/releases/download/v${cfssljson}/cfssljson_${cfssljson}_linux_amd64 -O cfssljson
else
echo "文件存在"
fi
if [ ! -f "helm-canary-linux-amd64.tar.gz" ];then
wget https://get.helm.sh/helm-canary-linux-amd64.tar.gz
else
echo "文件存在"
fi
if [ ! -f "nginx-1.22.1.tar.gz" ];then
wget http://nginx.org/download/nginx-1.22.1.tar.gz
else
echo "文件存在"
fi
# Ubuntu忽略,CentOS执行
systemctl disable --now firewalld
# Ubuntu忽略,CentOS执行
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0
cat /etc/fstab
# /dev/mapper/centos-swap swap swap defaults 0 0
# Ubuntu忽略,CentOS执行
# 方式一
# systemctl disable --now NetworkManager
# systemctl start network && systemctl enable network
# 方式二
cat > /etc/NetworkManager/conf.d/calico.conf << EOF
[keyfile]
unmanaged-devices=interface-name:cali*;interface-name:tunl*
EOF
systemctl restart NetworkManager
# 服务端
# apt install chrony -y
yum install chrony -y
cat > /etc/chrony.conf << EOF
pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 3.7.191.0/24
local stratum 10
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF
systemctl restart chronyd ; systemctl enable chronyd
# 客户端
# apt install chrony -y
yum install chrony -y
cat > /etc/chrony.conf << EOF
pool 3.7.191.61 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF
systemctl restart chronyd ; systemctl enable chronyd
#使用客户端进行验证
chronyc sources -v
ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF
# apt install -y sshpass
yum install -y sshpass
ssh-keygen -f /root/.ssh/id_rsa -P ''
export IP="3.7.191.61 3.7.191.62 3.7.191.63 3.7.191.64 3.7.191.65"
export SSHPASS=123123
for HOST in $IP;do
sshpass -e ssh-copy-id -o StrictHostKeyChecking=no $HOST
done
# Ubuntu忽略,CentOS执行
# 为 RHEL-8或 CentOS-8配置源
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm -y
sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo
sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo
# 为 RHEL-7 SL-7 或 CentOS-7 安装 ELRepo
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y
sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo
sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo
# 查看可用安装包
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
# Ubuntu忽略,CentOS执行
# 安装最新的内核
# 我这里选择的是稳定版kernel-ml 如需更新长期维护版本kernel-lt
yum -y --enablerepo=elrepo-kernel install kernel-ml
# 查看已安装那些内核
rpm -qa | grep kernel
# 查看默认内核
grubby --default-kernel
# 若不是最新的使用命令设置
grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo)
# 重启生效
reboot
# v8 整合命令为:
yum install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm -y ; sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo ; sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo ; yum --disablerepo="*" --enablerepo="elrepo-kernel" list available -y ; yum --enablerepo=elrepo-kernel install kernel-ml -y ; grubby --default-kernel ; reboot
# v7 整合命令为:
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y ; sed -i "s@mirrorlist@#mirrorlist@g" /etc/yum.repos.d/elrepo.repo ; sed -i "s@elrepo.org/linux@mirrors.tuna.tsinghua.edu.cn/elrepo@g" /etc/yum.repos.d/elrepo.repo ; yum --disablerepo="*" --enablerepo="elrepo-kernel" list available -y ; yum --enablerepo=elrepo-kernel install kernel-ml -y ; grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo) ; grubby --default-kernel ; reboot
# 离线版本
yum install -y /root/123/kernel-lt-5.4.231-1.el7.elrepo.x86_64.rpm ; grubby --set-default $(ls /boot/vmlinuz-* | grep elrepo) ; grubby --default-kernel ; reboot
# 对于CentOS7离线安装
# yum install /root/centos7/ipset-*.el7.x86_64.rpm /root/centos7/lm_sensors-libs-*.el7.x86_64.rpm /root/centos7/ipset-libs-*.el7.x86_64.rpm /root/centos7/sysstat-*.el7_9.x86_64.rpm /root/centos7/ipvsadm-*.el7.x86_64.rpm -y
# 对于 Ubuntu
# apt install ipvsadm ipset sysstat conntrack -y
# 对于 CentOS
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service
lsmod | grep -e ip_vs -e nf_conntrack
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 180224 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 176128 1 ip_vs
nf_defrag_ipv6 24576 2 nf_conntrack,ip_vs
nf_defrag_ipv4 16384 1 nf_conntrack
libcrc32c 16384 3 nf_conntrack,xfs,ip_vs
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF
sysctl --system
cat > /etc/hosts <<EOF
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
3.7.191.61 k8s-master01
3.7.191.62 k8s-master02
3.7.191.63 k8s-master03
3.7.191.64 k8s-node01
3.7.191.65 k8s-node02
3.7.191.66 lb-vip
EOF
注意 : 2.1 和 2.2 二选其一即可
# wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
cd kubernetes-v1.26.0/cby/
#创建cni插件所需目录
mkdir -p /etc/cni/net.d /opt/cni/bin
#解压cni二进制包
tar xf cni-plugins-linux-amd64-v*.tgz -C /opt/cni/bin/
# wget https://github.com/containerd/containerd/releases/download/v1.6.8/cri-containerd-cni-1.6.8-linux-amd64.tar.gz
#解压
tar -xzf cri-containerd-cni-*-linux-amd64.tar.gz -C /
#创建服务启动文件
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
systemctl restart systemd-modules-load.service
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# 加载内核
sysctl --system
# 创建默认配置文件
mkdir -p /etc/containerd
containerd config default | tee /etc/containerd/config.toml
# 修改Containerd的配置文件
sed -i "s#SystemdCgroup\ \=\ false#SystemdCgroup\ \=\ true#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep SystemdCgroup
sed -i "s#registry.k8s.io#registry.cn-hangzhou.aliyuncs.com/chenby#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep sandbox_image
sed -i "s#config_path\ \=\ \"\"#config_path\ \=\ \"/etc/containerd/certs.d\"#g" /etc/containerd/config.toml
cat /etc/containerd/config.toml | grep certs.d
mkdir /etc/containerd/certs.d/docker.io -pv
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://hub-mirror.c.163.com"]
capabilities = ["pull", "resolve"]
EOF
systemctl daemon-reload
systemctl enable --now containerd
systemctl restart containerd
# wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.24.2/crictl-v1.24.2-linux-amd64.tar.gz
#解压
tar xf crictl-v*-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
#测试
systemctl restart containerd
crictl info
v1.26.0 暂时不支持docker方式
# 二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
# wget https://download.docker.com/linux/static/stable/x86_64/docker-20.10.21.tgz
#解压
tar xf docker-*.tgz
#拷贝二进制文件
cp docker/* /usr/bin/
#创建containerd的service文件,并且启动
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now containerd.service
#准备docker的service文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service cri-docker.service docker.socket containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
#准备docker的socket文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
#创建docker组
groupadd docker
#启动docker
systemctl enable --now docker.socket && systemctl enable --now docker.service
#验证
docker info
cat >/etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com"
],
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"log-level": "warn",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"data-root": "/var/lib/docker"
}
EOF
systemctl restart docker
# 由于1.24以及更高版本不支持docker所以安装cri-docker
# 下载cri-docker
# wget https://mirrors.chenby.cn/https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5.amd64.tgz
# 解压cri-docker
tar xvf cri-dockerd-*.amd64.tgz
cp -r cri-dockerd/ /usr/bin/
chmod +x /usr/bin/cri-dockerd/cri-dockerd
# 写入启动配置文件
cat > /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# 写入socket配置文件
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
# 进行启动cri-docker
systemctl daemon-reload ; systemctl enable cri-docker --now
# 下载安装包
# wget https://dl.k8s.io/v1.25.4/kubernetes-server-linux-amd64.tar.gz
# wget https://github.com/etcd-io/etcd/releases/download/v3.5.6/etcd-v3.5.6-linux-amd64.tar.gz
# 解压k8s安装文件
cd cby
tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
# 解压etcd安装文件
tar -xf etcd*.tar.gz && mv etcd-*/etcd /usr/local/bin/ && mv etcd-*/etcdctl /usr/local/bin/
# 查看/usr/local/bin下内容
ls /usr/local/bin/
containerd crictl etcdctl kube-proxy
containerd-shim critest kube-apiserver kube-scheduler
containerd-shim-runc-v1 ctd-decoder kube-controller-manager
containerd-shim-runc-v2 ctr kubectl
containerd-stress etcd kubelet
[root@k8s-master01 ~]# kubelet --version
Kubernetes v1.26.1
[root@k8s-master01 ~]# etcdctl version
etcdctl version: 3.5.7
API version: 3.5
[root@k8s-master01 ~]#
Master='k8s-master02 k8s-master03'
Work='k8s-node01 k8s-node02'
for NODE in $Master; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done
for NODE in $Work; do scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done
mkdir -p /opt/cni/bin
mkdir pki
cd pki
cat > admin-csr.json << EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
cat > etcd-ca-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > front-proxy-ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"ca": {
"expiry": "876000h"
}
}
EOF
cat > kubelet-csr.json << EOF
{
"CN": "system:node:\$NODE",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "system:nodes",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > apiserver-csr.json << EOF
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > etcd-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
EOF
cat > front-proxy-client-csr.json << EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-proxy",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
EOF
cd ..
mkdir bootstrap
cd bootstrap
cat > bootstrap.secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF
cd ..
mkdir coredns
cd coredns
cat > coredns.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
EOF
cd ..
mkdir metrics-server
cd metrics-server
cat > metrics-server.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
image: registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- name: ca-ssl
mountPath: /etc/kubernetes/pki
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
EOF
# master01节点下载证书生成工具
# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.2_linux_amd64" -O /usr/local/bin/cfssl
# wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.2_linux_amd64" -O /usr/local/bin/cfssljson
# 软件包内有
cp cfssl /usr/local/bin/cfssl
cp cfssljson /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
特别说明除外,以下操作在所有master节点操作
mkdir /etc/etcd/ssl -p
cd pki
# 生成etcd证书和etcd证书的key(如果你觉得以后可能会扩容,可以在ip那多写几个预留出来)
# 若没有IPv6 可删除可保留
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
cfssl gencert \
-ca=/etc/etcd/ssl/etcd-ca.pem \
-ca-key=/etc/etcd/ssl/etcd-ca-key.pem \
-config=ca-config.json \
-hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,3.7.191.61,3.7.191.62,3.7.191.63,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30 \
-profile=kubernetes \
etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
Master='k8s-master02 k8s-master03'
for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done
特别说明除外,以下操作在所有master节点操作
mkdir -p /etc/kubernetes/pki
cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
# 生成一个根证书 ,多写了一些IP作为预留IP,为将来添加node做准备
# 10.96.0.1是service网段的第一个地址,需要计算,3.7.191.66为高可用vip地址
# 若没有IPv6 可删除可保留
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-hostname=10.96.0.1,3.7.191.66,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,x.oiox.cn,k.oiox.cn,l.oiox.cn,o.oiox.cn,3.7.191.61,3.7.191.62,3.7.191.63,3.7.191.64,3.7.191.65,3.7.191.66,3.7.191.67,3.7.191.68,3.7.191.69,192.168.1.70,fc00:43f4:1eea:1::10,fc00:43f4:1eea:1::20,fc00:43f4:1eea:1::30,fc00:43f4:1eea:1::40,fc00:43f4:1eea:1::50,fc00:43f4:1eea:1::60,fc00:43f4:1eea:1::70,fc00:43f4:1eea:1::80,fc00:43f4:1eea:1::90,fc00:43f4:1eea:1::100 \
-profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
# 有一个警告,可以忽略
cfssl gencert \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
-config=ca-config.json \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
在《5.高可用配置》选择使用那种高可用方案
若使用 haproxy、keepalived 那么为 --server=https://3.7.191.66:8443
若使用 nginx方案,那么为 --server=https://127.0.0.1:8443
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
# 设置一个集群项
# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://3.7.191.66:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个环境项,一个上下文
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置一个用户项
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
# 设置默认环境
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://3.7.191.66:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://3.7.191.66:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-credentials kubernetes-admin \
--client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
在《5.高可用配置》选择使用那种高可用方案
若使用 haproxy、keepalived 那么为 --server=https://3.7.191.66:8443
若使用 nginx方案,那么为 --server=https://127.0.0.1:8443
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://3.7.191.66:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-context kube-proxy@kubernetes \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
#其他节点创建目录
# mkdir /etc/kubernetes/pki/ -p
for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done; for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done
ls /etc/kubernetes/pki/
admin.csr controller-manager.csr kube-proxy.csr
admin-key.pem controller-manager-key.pem kube-proxy-key.pem
admin.pem controller-manager.pem kube-proxy.pem
apiserver.csr front-proxy-ca.csr sa.key
apiserver-key.pem front-proxy-ca-key.pem sa.pub
apiserver.pem front-proxy-ca.pem scheduler.csr
ca.csr front-proxy-client.csr scheduler-key.pem
ca-key.pem front-proxy-client-key.pem scheduler.pem
ca.pem front-proxy-client.pem
# 一共26个就对了
ls /etc/kubernetes/pki/ |wc -l
26
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://3.7.191.61:2380'
listen-client-urls: 'https://3.7.191.61:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://3.7.191.61:2380'
advertise-client-urls: 'https://3.7.191.61:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://3.7.191.61:2380,k8s-master02=https://3.7.191.62:2380,k8s-master03=https://3.7.191.63:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master02'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://3.7.191.62:2380'
listen-client-urls: 'https://3.7.191.62:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://3.7.191.62:2380'
advertise-client-urls: 'https://3.7.191.62:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://3.7.191.61:2380,k8s-master02=https://3.7.191.62:2380,k8s-master03=https://3.7.191.63:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
cat > /etc/etcd/etcd.config.yml << EOF
name: 'k8s-master03'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://3.7.191.63:2380'
listen-client-urls: 'https://3.7.191.63:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://3.7.191.63:2380'
advertise-client-urls: 'https://3.7.191.63:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'k8s-master01=https://3.7.191.61:2380,k8s-master02=https://3.7.191.62:2380,k8s-master03=https://3.7.191.63:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
mkdir /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
systemctl daemon-reload
systemctl enable --now etcd
# 如果要用IPv6那么把IPv4地址修改为IPv6即可
export ETCDCTL_API=3
etcdctl --endpoints="3.7.191.63:2379,3.7.191.62:2379,3.7.191.61:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
+-----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+-----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 3.7.191.63:2379 | d58e37898979ae63 | 3.5.7 | 20 kB | false | false | 2 | 8 | 8 | |
| 3.7.191.62:2379 | ec6b15415e24cb42 | 3.5.7 | 20 kB | false | false | 2 | 8 | 8 | |
| 3.7.191.61:2379 | 5e5cf1ca5cb2d291 | 3.5.7 | 20 kB | true | false | 2 | 8 | 8 | |
+-----------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
[root@k8s-master01 pki]#
注意 5.1.1 和5.1.2 二选一即可*
选择使用那种高可用方案
在《3.2.生成k8s相关证书》
若使用 nginx方案,那么为 --server=https://127.0.0.1:8443
若使用 haproxy、keepalived 那么为 --server=https://3.7.191.66:8443
在所有节点执行
# 安装编译环境
yum install gcc -y
# 下载解压nginx二进制文件
wget http://nginx.org/download/nginx-1.22.1.tar.gz
tar xvf nginx-*.tar.gz
cd nginx-*
# 进行编译
./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
make && make install
# 拷贝编译好的nginx
node='k8s-master02 k8s-master03 k8s-node01 k8s-node02'
for NODE in $node; do scp -r /usr/local/nginx/ $NODE:/usr/local/nginx/; done
# 使用我编译好的
cd kubernetes-v1.26.0/cby
# 拷贝我编译好的nginx
node='k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02'
for NODE in $node; do scp nginx.tar $NODE:/usr/local/; done
# 其他节点上执行
cd /usr/local/
tar xvf nginx.tar
在所有主机上执行
# 写入nginx配置文件
cat > /usr/local/nginx/conf/kube-nginx.conf <<EOF
worker_processes 1;
events {
worker_connections 1024;
}
stream {
upstream backend {
least_conn;
hash $remote_addr consistent;
server 3.7.191.61:6443 max_fails=3 fail_timeout=30s;
server 3.7.191.62:6443 max_fails=3 fail_timeout=30s;
server 3.7.191.63:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 127.0.0.1:8443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
EOF
# 写入启动配置文件
cat > /etc/systemd/system/kube-nginx.service <<EOF
[Unit]
Description=kube-apiserver nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx
ExecReload=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=5
StartLimitInterval=0
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 设置开机自启
systemctl enable --now kube-nginx
systemctl restart kube-nginx
systemctl status kube-nginx
systemctl disable --now firewalld
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
yum -y install keepalived haproxy
# cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:8443
bind 127.0.0.1:8443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-master01 3.7.191.61:6443 check
server k8s-master02 3.7.191.62:6443 check
server k8s-master03 3.7.191.63:6443 check
EOF
#cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
# 注意网卡名
interface eth0
mcast_src_ip 3.7.191.61
virtual_router_id 51
priority 100
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
3.7.191.66
}
track_script {
chk_apiserver
} }
EOF
# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
# 注意网卡名
interface eth0
mcast_src_ip 3.7.191.62
virtual_router_id 51
priority 80
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
3.7.191.66
}
track_script {
chk_apiserver
} }
EOF
# cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
# 注意网卡名
interface eth0
mcast_src_ip 3.7.191.63
virtual_router_id 51
priority 50
nopreempt
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
3.7.191.66
}
track_script {
chk_apiserver
} }
EOF
cat > /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash
err=0
for k in \$(seq 1 3)
do
check_code=\$(pgrep haproxy)
if [[ \$check_code == "" ]]; then
err=\$(expr \$err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ \$err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
# 给脚本授权
chmod +x /etc/keepalived/check_apiserver.sh
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
# 能ping同
[root@k8s-node02 ~]# ping 3.7.191.66
# 能telnet访问
[root@k8s-node02 ~]# telnet 3.7.191.66 8443
# 关闭主节点,看vip是否漂移到备节点
所有k8s节点创建以下目录
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=3.7.191.61 \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://3.7.191.61:2379,https://3.7.191.62:2379,https://3.7.191.63:2379 \\
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true
# --feature-gates=IPv6DualStack=true
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=3.7.191.62 \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://3.7.191.61:2379,https://3.7.191.62:2379,https://3.7.191.63:2379 \\
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true
# --feature-gates=IPv6DualStack=true
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=3.7.191.63 \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://3.7.191.61:2379,https://3.7.191.62:2379,https://3.7.191.63:2379 \\
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true
# --feature-gates=IPv6DualStack=true
# --token-auth-file=/etc/kubernetes/token.csv
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload && systemctl enable --now kube-apiserver
# 注意查看状态是否启动正常
# systemctl status kube-apiserver
# 所有master节点配置,且配置相同
# 172.16.0.0/12为pod网段,按需求设置你自己的网段
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--v=2 \\
--bind-address=127.0.0.1 \\
--root-ca-file=/etc/kubernetes/pki/ca.pem \\
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \\
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\
--leader-elect=true \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--pod-eviction-timeout=2m0s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--allocate-node-cidrs=true \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\
--node-cidr-mask-size-ipv4=24 \\
--node-cidr-mask-size-ipv6=120 \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
# --feature-gates=IPv6DualStack=true
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now kube-controller-manager
# systemctl status kube-controller-manager
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--v=2 \\
--bind-address=127.0.0.1 \\
--leader-elect=true \\
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now kube-scheduler
# systemctl status kube-scheduler
# 在《5.高可用配置》选择使用那种高可用方案
# 若使用 haproxy、keepalived 那么为 `--server=https://3.7.191.66:8443`
# 若使用 nginx方案,那么为 `--server=https://127.0.0.1:8443`
cd bootstrap
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
# token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true","reason":""}
etcd-2 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
# 切记执行,别忘记!!!
kubectl create -f bootstrap.secret.yaml
cd /etc/kubernetes/
for NODE in k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki; for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}; done; done
注意 : 8.2.1 和 8.2.2 需要和 上方 2.1 和 2.2 对应起来
v1.26.0 暂时不支持docker方式
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
[Service]
ExecStart=/usr/local/bin/kubelet \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-conf.yml \\
--container-runtime-endpoint=unix:///run/cri-dockerd.sock \\
--node-labels=node.kubernetes.io/node=
[Install]
WantedBy=multi-user.target
EOF
mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
# 所有k8s节点配置kubelet service
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-conf.yml \\
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \\
--node-labels=node.kubernetes.io/node=
# --feature-gates=IPv6DualStack=true
# --container-runtime=remote
# --runtime-request-timeout=15m
# --cgroup-driver=systemd
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
systemctl daemon-reload
systemctl restart kubelet
systemctl enable --now kubelet
[root@k8s-master01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 18s v1.26.0
k8s-master02 Ready <none> 16s v1.26.0
k8s-master03 Ready <none> 16s v1.26.0
k8s-node01 Ready <none> 14s v1.26.0
k8s-node02 Ready <none> 14s v1.26.0
[root@k8s-master01 ~]#
for NODE in k8s-master02 k8s-master03; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
for NODE in k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy.yaml \\
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.16.0.0/12,fc00:2222::/112
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
systemctl daemon-reload
systemctl restart kube-proxy
systemctl enable --now kube-proxy
注意 9.1 和 9.2 二选其一即可,建议在此处创建好快照后在进行操作,后续出问题可以回滚
** centos7 要升级libseccomp 不然 无法安装网络插件**
# https://github.com/opencontainers/runc/releases
# 升级runc
wget https://mirrors.chenby.cn/https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64
install -m 755 runc.amd64 /usr/local/sbin/runc
cp -p /usr/local/sbin/runc /usr/local/bin/runc
cp -p /usr/local/sbin/runc /usr/bin/runc
#下载高于2.4以上的包
yum -y install http://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm
#查看当前版本
[root@k8s-master-1 ~]# rpm -qa | grep libseccomp
libseccomp-2.5.1-1.el8.x86_64
# 本地没有公网 IPv6 使用 calico.yaml
kubectl apply -f calico.yaml
# 本地有公网 IPv6 使用 calico-ipv6.yaml
# kubectl apply -f calico-ipv6.yaml
# 若docker镜像拉不下来,可以使用我的仓库
# sed -i "s#docker.io/calico/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" calico.yaml
# sed -i "s#docker.io/calico/#registry.cn-hangzhou.aliyuncs.com/chenby/#g" calico-ipv6.yaml
# calico 初始化会很慢 需要耐心等待一下,大约十分钟左右
[root@k8s-master01 ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-6747f75cdc-fbvvc 1/1 Running 0 61s
kube-system calico-node-fs7hl 1/1 Running 0 61s
kube-system calico-node-jqz58 1/1 Running 0 61s
kube-system calico-node-khjlg 1/1 Running 0 61s
kube-system calico-node-wmf8q 1/1 Running 0 61s
kube-system calico-node-xc6gn 1/1 Running 0 61s
kube-system calico-typha-6cdc4b4fbc-57snb 1/1 Running 0 61s
# [root@k8s-master01 ~]# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
# [root@k8s-master01 ~]# chmod 700 get_helm.sh
# [root@k8s-master01 ~]# ./get_helm.sh
wget https://get.helm.sh/helm-canary-linux-amd64.tar.gz
tar xvf helm-canary-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin/
# 添加源
helm repo add cilium https://helm.cilium.io
# 修改为国内源
helm pull cilium/cilium
tar xvf cilium-*.tgz
cd cilium/
sed -i "s#quay.io/cilium#registry.cn-hangzhou.aliyuncs.com/chenby#g" values.yaml
sed -i "s#quay.io/coreos#registry.cn-hangzhou.aliyuncs.com/chenby#g" values.yaml
# 默认参数安装
helm install harbor ./cilium/ -n kube-system
# 启用ipv6
# helm install cilium cilium/cilium --namespace kube-system --set ipv6.enabled=true
# 启用路由信息和监控插件
# helm install cilium cilium/cilium --namespace kube-system --set hubble.relay.enabled=true --set hubble.ui.enabled=true --set prometheus.enabled=true --set operator.prometheus.enabled=true --set hubble.enabled=true --set hubble.metrics.enabled="{dns,drop,tcp,flow,port-distribution,icmp,http}"
[root@k8s-master01 ~]# kubectl get pod -A | grep cil
kube-system cilium-gmr6c 1/1 Running 0 5m3s
kube-system cilium-kzgdj 1/1 Running 0 5m3s
kube-system cilium-operator-69b677f97c-6pw4k 1/1 Running 0 5m3s
kube-system cilium-operator-69b677f97c-xzzdk 1/1 Running 0 5m3s
kube-system cilium-q2rnr 1/1 Running 0 5m3s
kube-system cilium-smx5v 1/1 Running 0 5m3s
kube-system cilium-tdjq4 1/1 Running 0 5m3s
[root@k8s-master01 ~]#
[root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/1.12.1/examples/kubernetes/addons/prometheus/monitoring-example.yaml
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl apply -f monitoring-example.yaml
namespace/cilium-monitoring created
serviceaccount/prometheus-k8s created
configmap/grafana-config created
configmap/grafana-cilium-dashboard created
configmap/grafana-cilium-operator-dashboard created
configmap/grafana-hubble-dashboard created
configmap/prometheus created
clusterrole.rbac.authorization.k8s.io/prometheus created
clusterrolebinding.rbac.authorization.k8s.io/prometheus created
service/grafana created
service/prometheus created
deployment.apps/grafana created
deployment.apps/prometheus created
[root@k8s-master01 yaml]#
说明 测试用例 需要在 安装CoreDNS 之后即可完成
[root@k8s-master01 yaml]# wget https://raw.githubusercontent.com/cilium/cilium/master/examples/kubernetes/connectivity-check/connectivity-check.yaml
[root@k8s-master01 yaml]# sed -i "s#google.com#baidu.cn#g" connectivity-check.yaml
[root@k8s-master01 yaml]# kubectl apply -f connectivity-check.yaml
deployment.apps/echo-a created
deployment.apps/echo-b created
deployment.apps/echo-b-host created
deployment.apps/pod-to-a created
deployment.apps/pod-to-external-1111 created
deployment.apps/pod-to-a-denied-cnp created
deployment.apps/pod-to-a-allowed-cnp created
deployment.apps/pod-to-external-fqdn-allow-google-cnp created
deployment.apps/pod-to-b-multi-node-clusterip created
deployment.apps/pod-to-b-multi-node-headless created
deployment.apps/host-to-b-multi-node-clusterip created
deployment.apps/host-to-b-multi-node-headless created
deployment.apps/pod-to-b-multi-node-nodeport created
deployment.apps/pod-to-b-intra-node-nodeport created
service/echo-a created
service/echo-b created
service/echo-b-headless created
service/echo-b-host-headless created
ciliumnetworkpolicy.cilium.io/pod-to-a-denied-cnp created
ciliumnetworkpolicy.cilium.io/pod-to-a-allowed-cnp created
ciliumnetworkpolicy.cilium.io/pod-to-external-fqdn-allow-google-cnp created
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
cilium-monitoring grafana-59957b9549-6zzqh 1/1 Running 0 10m
cilium-monitoring prometheus-7c8c9684bb-4v9cl 1/1 Running 0 10m
default chenby-75b5d7fbfb-7zjsr 1/1 Running 0 27h
default chenby-75b5d7fbfb-hbvr8 1/1 Running 0 27h
default chenby-75b5d7fbfb-ppbzg 1/1 Running 0 27h
default echo-a-6799dff547-pnx6w 1/1 Running 0 10m
default echo-b-fc47b659c-4bdg9 1/1 Running 0 10m
default echo-b-host-67fcfd59b7-28r9s 1/1 Running 0 10m
default host-to-b-multi-node-clusterip-69c57975d6-z4j2z 1/1 Running 0 10m
default host-to-b-multi-node-headless-865899f7bb-frrmc 1/1 Running 0 10m
default pod-to-a-allowed-cnp-5f9d7d4b9d-hcd8x 1/1 Running 0 10m
default pod-to-a-denied-cnp-65cc5ff97b-2rzb8 1/1 Running 0 10m
default pod-to-a-dfc64f564-p7xcn 1/1 Running 0 10m
default pod-to-b-intra-node-nodeport-677868746b-trk2l 1/1 Running 0 10m
default pod-to-b-multi-node-clusterip-76bbbc677b-knfq2 1/1 Running 0 10m
default pod-to-b-multi-node-headless-698c6579fd-mmvd7 1/1 Running 0 10m
default pod-to-b-multi-node-nodeport-5dc4b8cfd6-8dxmz 1/1 Running 0 10m
default pod-to-external-1111-8459965778-pjt9b 1/1 Running 0 10m
default pod-to-external-fqdn-allow-google-cnp-64df9fb89b-l9l4q 1/1 Running 0 10m
kube-system cilium-7rfj6 1/1 Running 0 56s
kube-system cilium-d4cch 1/1 Running 0 56s
kube-system cilium-h5x8r 1/1 Running 0 56s
kube-system cilium-operator-5dbddb6dbf-flpl5 1/1 Running 0 56s
kube-system cilium-operator-5dbddb6dbf-gcznc 1/1 Running 0 56s
kube-system cilium-t2xlz 1/1 Running 0 56s
kube-system cilium-z65z7 1/1 Running 0 56s
kube-system coredns-665475b9f8-jkqn8 1/1 Running 1 (36h ago) 36h
kube-system hubble-relay-59d8575-9pl9z 1/1 Running 0 56s
kube-system hubble-ui-64d4995d57-nsv9j 2/2 Running 0 56s
kube-system metrics-server-776f58c94b-c6zgs 1/1 Running 1 (36h ago) 37h
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl edit svc -n kube-system hubble-ui
service/hubble-ui edited
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring grafana
service/grafana edited
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl edit svc -n cilium-monitoring prometheus
service/prometheus edited
[root@k8s-master01 yaml]#
type: NodePort
[root@k8s-master01 yaml]# kubectl get svc -A | grep monit
cilium-monitoring grafana NodePort 10.100.250.17 <none> 3000:30707/TCP 15m
cilium-monitoring prometheus NodePort 10.100.131.243 <none> 9090:31155/TCP 15m
[root@k8s-master01 yaml]#
[root@k8s-master01 yaml]# kubectl get svc -A | grep hubble
kube-system hubble-metrics ClusterIP None <none> 9965/TCP 5m12s
kube-system hubble-peer ClusterIP 10.100.150.29 <none> 443/TCP 5m12s
kube-system hubble-relay ClusterIP 10.109.251.34 <none> 80/TCP 5m12s
kube-system hubble-ui NodePort 10.102.253.59 <none> 80:31219/TCP 5m12s
[root@k8s-master01 yaml]#
http://3.7.191.61:30707
http://3.7.191.61:31155
http://3.7.191.61:31219
cd coredns/
cat coredns.yaml | grep clusterIP:
clusterIP: 10.96.0.10
kubectl create -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
在新版的Kubernetes中系统资源的采集均使用Metrics-server,可以通过Metrics采集节点和Pod的内存、磁盘、CPU和网络的使用率
# 安装metrics server
cd metrics-server/
kubectl apply -f metrics-server.yaml
kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master01 154m 1% 1715Mi 21%
k8s-master02 151m 1% 1274Mi 16%
k8s-master03 523m 6% 1345Mi 17%
k8s-node01 84m 1% 671Mi 8%
k8s-node02 73m 0% 727Mi 9%
k8s-node03 96m 1% 769Mi 9%
k8s-node04 68m 0% 673Mi 8%
k8s-node05 82m 1% 679Mi 8%
cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: docker.io/library/busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
# 查看
kubectl get pod
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 17s
kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 17h
kubectl exec busybox -n default -- nslookup kubernetes
3Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
kubectl exec busybox -n default -- nslookup kube-dns.kube-system
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kube-dns.kube-system
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
telnet 10.96.0.1 443
Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.
telnet 10.96.0.10 53
Trying 10.96.0.10...
Connected to 10.96.0.10.
Escape character is '^]'.
curl 10.96.0.10:53
curl: (52) Empty reply from server
kubectl get po -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
busybox 1/1 Running 0 17m 172.27.14.193 k8s-node02 <none> <none>
kubectl get po -n kube-system -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-5dffd5886b-4blh6 1/1 Running 0 77m 172.25.244.193 k8s-master01 <none> <none>
calico-node-fvbdq 1/1 Running 1 (75m ago) 77m 3.7.191.61 k8s-master01 <none> <none>
calico-node-g8nqd 1/1 Running 0 77m 3.7.191.64 k8s-node01 <none> <none>
calico-node-mdps8 1/1 Running 0 77m 3.7.191.65 k8s-node02 <none> <none>
calico-node-nf4nt 1/1 Running 0 77m 3.7.191.63 k8s-master03 <none> <none>
calico-node-sq2ml 1/1 Running 0 77m 3.7.191.62 k8s-master02 <none> <none>
calico-typha-8445487f56-mg6p8 1/1 Running 0 77m 3.7.191.65 k8s-node02 <none> <none>
calico-typha-8445487f56-pxbpj 1/1 Running 0 77m 3.7.191.61 k8s-master01 <none> <none>
calico-typha-8445487f56-tnssl 1/1 Running 0 77m 3.7.191.64 k8s-node01 <none> <none>
coredns-5db5696c7-67h79 1/1 Running 0 63m 172.25.92.65 k8s-master02 <none> <none>
metrics-server-6bf7dcd649-5fhrw 1/1 Running 0 61m 172.18.195.1 k8s-master03 <none> <none>
# 进入busybox ping其他节点上的pod
kubectl exec -ti busybox -- sh
/ # ping 3.7.191.64
PING 3.7.191.64 (3.7.191.64): 56 data bytes
64 bytes from 3.7.191.64: seq=0 ttl=63 time=0.358 ms
64 bytes from 3.7.191.64: seq=1 ttl=63 time=0.668 ms
64 bytes from 3.7.191.64: seq=2 ttl=63 time=0.637 ms
64 bytes from 3.7.191.64: seq=3 ttl=63 time=0.624 ms
64 bytes from 3.7.191.64: seq=4 ttl=63 time=0.907 ms
# 可以连通证明这个pod是可以跨命名空间和跨主机通信的
cat > deployments.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: docker.io/library/nginx:1.14.2
ports:
- containerPort: 80
EOF
kubectl apply -f deployments.yaml
deployment.apps/nginx-deployment created
kubectl get pod
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 6m25s
nginx-deployment-9456bbbf9-4bmvk 1/1 Running 0 8s
nginx-deployment-9456bbbf9-9rcdk 1/1 Running 0 8s
nginx-deployment-9456bbbf9-dqv8s 1/1 Running 0 8s
# 删除nginx
[root@k8s-master01 ~]# kubectl delete -f deployments.yaml
helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/
helm install kubernetes-dashboard kubernetes-dashboard/kubernetes-dashboard --namespace kube-system
kubectl edit svc kubernetes-dashboard -n kube-system
type: NodePort
kubectl get svc kubernetes-dashboard -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.108.120.110 <none> 443:30034/TCP 34s
cat > dashboard-user.yaml << EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
EOF
kubectl apply -f dashboard-user.yaml
kubectl -n kube-system create token admin-user
eyJhbGciOiJSUzI1NiIsImtpZCI6IkFZWENLUmZQWTViWUF4UV81NWJNb0JEa0I4R2hQMHVac2J3RDM3RHJLcFEifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNjcwNjc0MzY1LCJpYXQiOjE2NzA2NzA3NjUsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiODkyODRjNGUtYzk0My00ODkzLWE2ZjctNTYxZWJhMzE2NjkwIn19LCJuYmYiOjE2NzA2NzA3NjUsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDphZG1pbi11c2VyIn0.DFxzS802Iu0lldikjhyp2diZSpVAUoSTbOjerH2t7ToM0TMoPQdcdDyvBTcNlIew3F01u4D6atNV7J36IGAnHEX0Q_cYAb00jINjy1YXGz0gRhRE0hMrXay2-Qqo6tAORTLUVWrctW6r0li5q90rkBjr5q06Lt5BTpUhbhbgLQQJWwiEVseCpUEikxD6wGnB1tCamFyjs3sa-YnhhqCR8wUAZcTaeVbMxCuHVAuSqnIkxat9nyxGcsjn7sqmBqYjjOGxp5nhHPDj03TWmSJlb_Csc7pvLsB9LYm0IbER4xDwtLZwMAjYWRbjKxbkUp4L9v5CZ4PbIHap9qQp1FXreA
cd ingress/
kubectl apply -f deploy.yaml
kubectl apply -f backend.yaml
# 等创建完成后在执行:
kubectl apply -f ingress-demo-app.yaml
kubectl get ingress
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress-host-bar nginx hello.chenby.cn,demo.chenby.cn 3.7.191.62 80 7s
[root@hello ~/yaml]# kubectl get svc -A | grep ingress
ingress-nginx ingress-nginx-controller NodePort 10.104.231.36 <none> 80:32636/TCP,443:30579/TCP 104s
ingress-nginx ingress-nginx-controller-admission ClusterIP 10.101.85.88 <none> 443/TCP 105s
[root@hello ~/yaml]#
#部署应用
cat<<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: chenby
spec:
replicas: 3
selector:
matchLabels:
app: chenby
template:
metadata:
labels:
app: chenby
spec:
containers:
- name: chenby
image: docker.io/library/nginx
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: chenby
spec:
ipFamilyPolicy: PreferDualStack
ipFamilies:
- IPv6
- IPv4
type: NodePort
selector:
app: chenby
ports:
- port: 80
targetPort: 80
EOF
#查看端口
[root@k8s-master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
chenby NodePort fd00::a29c <none> 80:30779/TCP 5s
[root@k8s-master01 ~]#
#使用内网访问
[root@localhost yaml]# curl -I http://[fd00::a29c]
HTTP/1.1 200 OK
Server: nginx/1.21.6
Date: Thu, 05 May 2022 10:20:35 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
Connection: keep-alive
ETag: "61f01158-267"
Accept-Ranges: bytes
[root@localhost yaml]# curl -I http://3.7.191.61:30779
HTTP/1.1 200 OK
Server: nginx/1.21.6
Date: Thu, 05 May 2022 10:20:59 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
Connection: keep-alive
ETag: "61f01158-267"
Accept-Ranges: bytes
[root@localhost yaml]#
#使用公网访问
[root@localhost yaml]# curl -I http://[2409:8a10:9e18:9020::10]:30779
HTTP/1.1 200 OK
Server: nginx/1.21.6
Date: Thu, 05 May 2022 10:20:54 GMT
Content-Type: text/html
Content-Length: 615
Last-Modified: Tue, 25 Jan 2022 15:03:52 GMT
Connection: keep-alive
ETag: "61f01158-267"
Accept-Ranges: bytes
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
# docker pull 镜像
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/cni:master
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/node:master
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/typha:master
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2
docker pull kubernetesui/dashboard:v2.7.0
docker pull kubernetesui/metrics-scraper:v1.0.8
docker pull quay.io/cilium/cilium:v1.12.6
docker pull quay.io/cilium/certgen:v0.1.8
docker pull quay.io/cilium/hubble-relay:v1.12.6
docker pull quay.io/cilium/hubble-ui-backend:v0.9.2
docker pull quay.io/cilium/hubble-ui:v0.9.2
docker pull quay.io/cilium/cilium-etcd-operator:v2.0.7
docker pull quay.io/cilium/operator:v1.12.6
docker pull quay.io/cilium/clustermesh-apiserver:v1.12.6
docker pull quay.io/coreos/etcd:v3.5.4
docker pull quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26
# docker 保存镜像
docker save registry.cn-hangzhou.aliyuncs.com/chenby/cni:master -o cni.tar
docker save registry.cn-hangzhou.aliyuncs.com/chenby/node:master -o node.tar
docker save registry.cn-hangzhou.aliyuncs.com/chenby/typha:master -o typha.tar
docker save registry.cn-hangzhou.aliyuncs.com/chenby/kube-controllers:master -o kube-controllers.tar
docker save registry.cn-hangzhou.aliyuncs.com/chenby/coredns:v1.10.0 -o coredns.tar
docker save registry.cn-hangzhou.aliyuncs.com/chenby/pause:3.6 -o pause.tar
docker save registry.cn-hangzhou.aliyuncs.com/chenby/metrics-server:v0.5.2 -o metrics-server.tar
docker save kubernetesui/dashboard:v2.7.0 -o dashboard.tar
docker save kubernetesui/metrics-scraper:v1.0.8 -o metrics-scraper.tar
docker save quay.io/cilium/cilium:v1.12.6 -o cilium.tar
docker save quay.io/cilium/certgen:v0.1.8 -o certgen.tar
docker save quay.io/cilium/hubble-relay:v1.12.6 -o hubble-relay.tar
docker save quay.io/cilium/hubble-ui-backend:v0.9.2 -o hubble-ui-backend.tar
docker save quay.io/cilium/hubble-ui:v0.9.2 -o hubble-ui.tar
docker save quay.io/cilium/cilium-etcd-operator:v2.0.7 -o cilium-etcd-operator.tar
docker save quay.io/cilium/operator:v1.12.6 -o operator.tar
docker save quay.io/cilium/clustermesh-apiserver:v1.12.6 -o clustermesh-apiserver.tar
docker save quay.io/coreos/etcd:v3.5.4 -o etcd.tar
docker save quay.io/cilium/startup-script:d69851597ea019af980891a4628fb36b7880ec26 -o startup-script.tar
# 传输到各个节点
for NODE in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02; do scp -r images/ $NODE:/root/ ; done
# 创建命名空间
ctr ns create k8s.io
# 导入镜像
ctr --namespace k8s.io image import images/cni.tar
ctr --namespace k8s.io image import images/node.tar
ctr --namespace k8s.io image import images/typha.tar
ctr --namespace k8s.io image import images/kube-controllers.tar
ctr --namespace k8s.io image import images/coredns.tar
ctr --namespace k8s.io image import images/pause.tar
ctr --namespace k8s.io image import images/metrics-server.tar
ctr --namespace k8s.io image import images/dashboard.tar
ctr --namespace k8s.io image import images/metrics-scraper.tar
ctr --namespace k8s.io image import images/dashboard.tar
ctr --namespace k8s.io image import images/metrics-scraper.tar
ctr --namespace k8s.io image import images/cilium.tar
ctr --namespace k8s.io image import images/certgen.tar
ctr --namespace k8s.io image import images/hubble-relay.tar
ctr --namespace k8s.io image import images/hubble-ui-backend.tar
ctr --namespace k8s.io image import images/hubble-ui.tar
ctr --namespace k8s.io image import images/cilium-etcd-operator.tar
ctr --namespace k8s.io image import images/operator.tar
ctr --namespace k8s.io image import images/clustermesh-apiserver.tar
ctr --namespace k8s.io image import images/etcd.tar
ctr --namespace k8s.io image import images/startup-script.tar
# pull tar包 解压后
helm pull cilium/cilium
# 查看镜像版本
root@hello:~/cilium# cat values.yaml| grep tag: -C1
repository: "quay.io/cilium/cilium"
tag: "v1.12.6"
pullPolicy: "IfNotPresent"
--
repository: "quay.io/cilium/certgen"
tag: "v0.1.8@sha256:4a456552a5f192992a6edcec2febb1c54870d665173a33dc7d876129b199ddbd"
pullPolicy: "IfNotPresent"
--
repository: "quay.io/cilium/hubble-relay"
tag: "v1.12.6"
# hubble-relay-digest
--
repository: "quay.io/cilium/hubble-ui-backend"
tag: "v0.9.2@sha256:a3ac4d5b87889c9f7cc6323e86d3126b0d382933bd64f44382a92778b0cde5d7"
pullPolicy: "IfNotPresent"
--
repository: "quay.io/cilium/hubble-ui"
tag: "v0.9.2@sha256:d3596efc94a41c6b772b9afe6fe47c17417658956e04c3e2a28d293f2670663e"
pullPolicy: "IfNotPresent"
--
repository: "quay.io/cilium/cilium-etcd-operator"
tag: "v2.0.7@sha256:04b8327f7f992693c2cb483b999041ed8f92efc8e14f2a5f3ab95574a65ea2dc"
pullPolicy: "IfNotPresent"
--
repository: "quay.io/cilium/operator"
tag: "v1.12.6"
# operator-generic-digest
--
repository: "quay.io/cilium/startup-script"
tag: "d69851597ea019af980891a4628fb36b7880ec26"
pullPolicy: "IfNotPresent"
--
repository: "quay.io/cilium/cilium"
tag: "v1.12.6"
# cilium-digest
--
repository: "quay.io/cilium/clustermesh-apiserver"
tag: "v1.12.6"
# clustermesh-apiserver-digest
--
repository: "quay.io/coreos/etcd"
tag: "v3.5.4@sha256:795d8660c48c439a7c3764c2330ed9222ab5db5bb524d8d0607cac76f7ba82a3"
pullPolicy: "IfNotPresent"
关于
https://www.oiox.cn/index.php/start-page.html
CSDN、GitHub、知乎、开源中国、思否、掘金、简书、华为云、阿里云、腾讯云、哔哩哔哩、今日头条、新浪微博、个人博客
全网可搜《小陈运维》
文章主要发布于微信公众号:《Linux运维交流社区》