Bendi新闻
>
Kubernetes高可用集群二进制部署v1.28.0版本

Kubernetes高可用集群二进制部署v1.28.0版本

10月前

一、集群环境准备

1.1 主机规划

        

主机IP地址主机名主机配置主机角色软件列表
192.168.198.144k8s-master12C4Gmasterkube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubectl
192.168.198.145k8s-master22C4Gmasterkube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubectl
192.168.198.146k8s-master32C4Gmasterkube-apiserver、kube-controller-manager、kube-scheduler、etcd、kubectl
192.168.198.147k8s-node12C4Gworkerkubelet、kube-proxy、docker、haproxy、keepalive
192.168.198.148k8s-node22C4Gworkerkubelet、kube-proxy、docker、haproxy、keepalive
192.168.198.100//VIP(虚拟IP)

1.2 软件版本

软件名称版本备注
CentOS7kernel版本:5.4
kubernetesv1.28.0
etcdv3.5.11最新版本
calicov3.26.4
corednsv1.10.1
docker24.0.7
haproxy5.18YUM源默认
keepalived3.5YUM源默认

1.3 网络分配

网络名称网段备注
Node网络192.168.198.0/24
Service网络10.96.0.0/16
Pod网络10.244.0.0/16

 

二、集群部署

2.1主机准备

2.1.1 主机名设置

hostnamectl set-hostname xxx

2.1.2 主机与IP地址解析

cat >> /etc/hosts << EOF
192.168.198.144 k8s-master1
192.168.198.145 k8s-master2
192.168.198.146 k8s-master3
192.168.198.147 k8s-node1
192.168.198.148 k8s-node2
EOF

2.1.3 主机安全设置

2.1.3.1 关闭防火墙

systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state

2.1.3.2 关闭selinux

setenforce 0
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
sestatus

2.1.4 交换分区设置

swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
echo "vm.swappiness=0" >> /etc/sysctl.conf
sysctl -p

2.1.5 主机系统时间同步

安装软件
yum -y install ntpdate

制定时间同步计划任务
crontab -e
0 */1 * * * ntpdate time1.aliyun.com

2.1.7 ipvs管理工具安装及模块加载

yum -y install ipvsadm ipset sysstat conntrack libseccomp
#配置ipvasdm模块加载方式
#添加需要加载的模块
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
#授权,运行,检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

 

2.1.9 Linux内核升级

至少升级到5.4版本

yum -y install perl
#导入elrepo gpg key
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
#安装elrepo yum源仓库
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
#安装kernel-lt 版本,ml为最新稳定版本,lt为长期维护版本
yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
#设置grub2默认引导为0,开机启动
grub2-set-default 0
#重新生成grub2引导文件
grub2-mkconfig -o /boot/grub2/grub.cfg

[root@k8s-node2 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-5.4.265-1.el7.elrepo.x86_64
Found initrd image: /boot/initramfs-5.4.265-1.el7.elrepo.x86_64.img
Found linux image: /boot/vmlinuz-3.10.0-1160.el7.x86_64
Found initrd image: /boot/initramfs-3.10.0-1160.el7.x86_64.img
Found linux image: /boot/vmlinuz-0-rescue-3ae18cd125514089a081168aac1529ca
Found initrd image: /boot/initramfs-0-rescue-3ae18cd125514089a081168aac1529ca.img
done
#所有节点配置完内核后,重启服务器,使升级的内核生效
reboot -h now
#验证是否升级成功
[root@k8s-master1 ~]# uname -r
5.4.265-1.el7.elrepo.x86_64

 

2.1.10 Linux内核优化

#添加网桥过滤及内核转发配置文件
cat > /etc/sysctl.d/k8s.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
#加载br_netfilter模块
#临时加载模块
[root@k8s-master1 ~]# modprobe br_netfilter
#查看
[root@k8s-master1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
#永久性加载模块
cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF
#设置为开机启动
systemctl enable --now systemd-modules-load.service
#查看是否加载
lsmod | grep br_netfilter

[root@k8s-master1 ~]# lsmod |
grep br_netfilter
br_netfilter 28672 0

 以上在所有节点操作

2.1.11 配置免密登录

在k8s-master1上操作即可,复制公钥到其他节点

[root@k8s-master1 ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:40/tHc966yq63YQ8YK84udBoZMqkCeZB5XTj8QaSOZo root@k8s-master1
The key's randomart image is:
+---[RSA 2048]----+
| +o= |
| +++ = |
| .o... o |
|.E . |
|.o . o So |
|+ * + o...+.. |
| + o + .o .=... |
| . .o.ooo+. +.|
| oo++.oo==+|
+----[SHA256]-----+
ssh-copy-id root@k8s-master2
ssh-copy-id root@k8s-master3
ssh-copy-id root@k8s-node1
ssh-copy-id root@k8s-node2

 

2.2 负载均衡器准备

2.2.1 安装haproxy与keepalived

在ha部署的节点上运行,本次ha部署在k8s-node1,k8s-node2上

yum -y install haproxy keepalived

 

2.2.2 HAProxy配置

cat >/etc/haproxy/haproxy.cfg<<"EOF"
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s

defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s

frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor

frontend k8s-master
bind 0.0.0.0:6443
bind 127.0.0.1:6443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master

backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server k8s-master1 192.168.198.144:6443 check
server k8s-master2 192.168.198.145:6443 check
server k8s-master3 192.168.198.146:6443 check
EOF

 

2.2.3 KeepAlived

主从配置不一致,需要注意。

ha1:

cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
mcast_src_ip 192.168.198.147
virtual_router_id 51
priority 100
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.198.100
}
track_script {
chk_apiserver
}
}
EOF
ha2:

cat >/etc/keepalived/keepalived.conf<<"EOF"
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
mcast_src_ip 192.168.198.148
virtual_router_id 51
priority 99
advert_int 2
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
192.168.198.100
}
track_script {
chk_apiserver
}
}
EOF

2.2.4 健康检查脚本

ha1及ha2均要配置

cat > /etc/keepalived/check_apiserver.sh <<"EOF"
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done

if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_apiserver.sh

 

2.2.5 启动服务并验证

systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
#查询地址,有vip
[root@k8s-node1 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 00:0c:29:09:7a:32 brd ff:ff:ff:ff:ff:ff
inet 192.168.198.147/24 brd 192.168.198.255 scope global noprefixroute ens33
valid_lft forever preferred_lft forever
inet 192.168.198.100/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::6d0d:5af:b421:6829/64 scope link noprefixroute
valid_lft forever preferred_lft forever
inet6 fe80::2dcd:beb6:b077:827d/64 scope link tentative noprefixroute dadfailed
valid_lft forever preferred_lft forever
[root@k8s-node1 ~]#

 测试网页是否正常显示

2.4 部署ETCD集群

在k8s-master1上操作。

2.4.1 创建工作目录

mkdir -p /data/k8s-work

2.4.2 获取cfssl工具

https://github.com/cloudflare/cfssl/releases
#把下载好的cfssl工具上传到服务器
#yum -y install lrzsz
[root@k8s-master1 k8s-work]# ll
total 40232
-rw-r--r-- 1 root root 16659824 Mar 9 2022 cfssl_1.6.1_linux_amd64
-rw-r--r-- 1 root root 13502544 Mar 9 2022 cfssl-certinfo_1.6.1_linux_amd64
-rw-r--r-- 1 root root 11029744 Mar 9 2022 cfssljson_1.6.1_linux_amd64
# 授权可执行权限
[root@k8s-master1 k8s-work]# chmod +x cfssl*
[root@k8s-master1 k8s-work]# ll
total 40232
-rwxr-xr-x 1 root root 16659824 Mar 9 2022 cfssl_1.6.1_linux_amd64
-rwxr-xr-x 1 root root 13502544 Mar 9 2022 cfssl-certinfo_1.6.1_linux_amd64
-rwxr-xr-x 1 root root 11029744 Mar 9 2022 cfssljson_1.6.1_linux_amd64
# 修改名称,放到/usr/local/bin目录下
[root@k8s-master1 k8s-work]# mv cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
[root@k8s-master1 k8s-work]# mv cfssl-certinfo_1.6.1_linux_amd64 /usr/local/bin/cfssl-certinfo
[root@k8s-master1 k8s-work]# mv cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
# 安装完成,查看cfssl版本
[root@k8s-master1 k8s-work]# cfssl version
Version: 1.6.1
Runtime: go1.12.12

 

2.4.3 创建CA证书

ca作为证书颁发机构

2.4.3.1 配置ca证书请求文件

cat > ca-csr.json <<"EOF"
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
],
"ca": {
"expiry": "87600h"
}
}
EOF

2.4.3.2 创建ca证书

cfssl gencert -initca ca-csr.json | cfssljson -bare ca
[root@k8s-master1 k8s-work]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
2024/01/04 09:22:43 [INFO] generating a new CA key and certificate from CSR
2024/01/04 09:22:43 [INFO] generate received request
2024/01/04 09:22:43 [INFO] received CSR
2024/01/04 09:22:43 [INFO] generating key: rsa-2048
2024/01/04 09:22:43 [INFO] encoded CSR
2024/01/04 09:22:43 [INFO] signed certificate with serial number 338731219198113317417686336532940600662573621163

#输出ca.csr ca-key.pem ca.pem
[root@k8s-master1 k8s-work]# ll
total 16
-rw-r--r-- 1 root root 1045 Jan 4 09:22 ca.csr
-rw-r--r-- 1 root root 256 Jan 4 09:22 ca-csr.json
-rw------- 1 root root 1679 Jan 4 09:22 ca-key.pem
-rw-r--r-- 1 root root 1310 Jan 4 09:22 ca.pem

 

2.4.3.3 配置ca证书策略

cfssl print-defaults config > ca-config.json
cat > ca-config.json <<"EOF"
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF

2.4.4 创建etcd证书

2.4.4.1 配置etcd请求文件

192.168.198.150-151预留的ip

cat > etcd-csr.json <<"EOF"
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.198.144",
"192.168.198.145",
"192.168.198.146",
"192.168.198.150",
"192.168.198.151"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}]
}
EOF

2.4.4.2 生成etcd证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd

生成etcd.csr、etcd-key.pem、etcd.pem

[root@k8s-master1 k8s-work]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd
2024/01/04 10:18:44 [INFO] generate received request
2024/01/04 10:18:44 [INFO] received CSR
2024/01/04 10:18:44 [INFO] generating key: rsa-2048
2024/01/04 10:18:44 [INFO] encoded CSR
2024/01/04 10:18:44 [INFO] signed certificate with serial number 615580008866301102078218902811936499168508210128

 

2.4.5 部署etcd集群

2.4.5.1 下载etcd软件包

https://github.com/etcd-io/etcd/releases/download/v3.5.11/etcd-v3.5.11-linux-amd64.tar.gz

 

2.4.5.2 安装etcd软件

#解压etcd源码包
[root@k8s-master1 k8s-work]# tar -xf etcd-v3.5.11-linux-amd64.tar.gz

[root@k8s-master1 k8s-work]# cd etcd-v3.5.11-linux-amd64
[root@k8s-master1 etcd-v3.5.11-linux-amd64]# ll
total 54896
drwxr-xr-x 3 528287 89939 40 Dec 7 18:30 Documentation
-rwxr-xr-x 1 528287 89939 23535616 Dec 7 18:30 etcd
-rwxr-xr-x 1 528287 89939 17739776 Dec 7 18:30 etcdctl
-rwxr-xr-x 1 528287 89939 14864384 Dec 7 18:30 etcdutl
-rw-r--r-- 1 528287 89939 42066 Dec 7 18:30 README-etcdctl.md
-rw-r--r-- 1 528287 89939 7359 Dec 7 18:30 README-etcdutl.md
-rw-r--r-- 1 528287 89939 9394 Dec 7 18:30 README.md
-rw-r--r-- 1 528287 89939 7896 Dec 7 18:30 READMEv2-etcdctl.md

#把etcd执行文件拷贝到/usr/local/bin目录下,后面配置文件都指定在这个文件执行命令
[root@k8s-master1 etcd-v3.5.11-linux-amd64]# cp etcd* /usr/local/bin/
#分发到其他节点
[root@k8s-master1 etcd-v3.5.11-linux-amd64]# scp etcd* k8s-master2:/usr/local/bin/
etcd 100% 22MB 53.1MB/s 00:00
etcdctl 100% 17MB 48.6MB/s 00:00
etcdutl 100% 14MB 60.6MB/s 00:00
[root@k8s-master1 etcd-v3.5.11-linux-amd64]# scp etcd* k8s-master3:/usr/local/bin/
etcd 100% 22MB 64.5MB/s 00:00
etcdctl 100% 17MB 95.0MB/s 00:00
etcdutl 100% 14MB 79.0MB/s 00:00
[root@k8s-master1 etcd-v3.5.11-linux-amd64]#

 

2.4.5.4 创建配置文件

mkdir /etc/etcd

k8s-master1  etcd配置

cat >  /etc/etcd/etcd.conf <<EOF
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.198.144:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.198.144:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.198.144:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.198.144:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.198.144:2380,etcd2=https://192.168.198.145:2380,etcd3=https://192.168.198.146:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
说明:
ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEER_URLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIAL_CLUSTER_TOKEN:集群Token
ETCD_INITIAL_CLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群

k8s-master2  etcd配置

cat >  /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.198.145:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.198.145:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.198.145:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.198.145:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.198.144:2380,etcd2=https://192.168.198.145:2380,etcd3=https://192.168.198.146:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

k8s-master3  etcd配置

cat >  /etc/etcd/etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.198.146:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.198.146:2379,http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.198.146:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.198.146:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.198.144:2380,etcd2=https://192.168.198.145:2380,etcd3=https://192.168.198.146:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

 

2.4.5.5 创建服务配置文件

mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd
[root@k8s-master1 etcd]# cd /data/k8s-work
[root@k8s-master1 k8s-work]# ll
total 19896
-rw-r--r-- 1 root root 356 Jan 4 10:03 ca-config.json
-rw-r--r-- 1 root root 1045 Jan 4 09:22 ca.csr
-rw-r--r-- 1 root root 256 Jan 4 09:22 ca-csr.json
-rw------- 1 root root 1679 Jan 4 09:22 ca-key.pem
-rw-r--r-- 1 root root 1310 Jan 4 09:22 ca.pem
-rw-r--r-- 1 root root 1078 Jan 4 10:18 etcd.csr
-rw-r--r-- 1 root root 331 Jan 4 10:16 etcd-csr.json
-rw------- 1 root root 1679 Jan 4 10:18 etcd-key.pem
-rw-r--r-- 1 root root 1452 Jan 4 10:18 etcd.pem
drwxr-xr-x 3 528287 89939 163 Dec 7 18:30 etcd-v3.5.11-linux-amd64
-rw-r--r-- 1 root root 20334735 Dec 7 18:36 etcd-v3.5.11-linux-amd64.tar.gz
#拷贝生成的etcd,ca证书到对应ssl目录
[root@k8s-master1 k8s-work]# cp ca*.pem /etc/etcd/ssl
[root@k8s-master1 k8s-work]# cp etcd*.pem /etc/etcd/ssl
#分发证书到其他节点
[root@k8s-master1 k8s-work]# scp ca*.pem k8s-master2:/etc/etcd/ssl
ca-key.pem 100% 1679 1.4MB/s 00:00
ca.pem 100% 1310 1.0MB/s 00:00
[root@k8s-master1 k8s-work]# scp ca*.pem k8s-master3:/etc/etcd/ssl
ca-key.pem 100% 1679 1.3MB/s 00:00
ca.pem

[root@k8s-master1 k8s-work]# scp etcd*.pem k8s-master2:/etc/etcd/ssl
etcd-key.pem 100% 1679 1.1MB/s 00:00
etcd.pem 100% 1452 1.2MB/s 00:00
[root@k8s-master1 k8s-work]# scp etcd*.pem k8s-master3:/etc/etcd/ssl
etcd-key.pem 100% 1679 1.4MB/s 00:00
etcd.pem

 生成etcd启动文件,三个节点配置一样

cat > /etc/systemd/system/etcd.service <<"EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
--peer-client-cert-auth \
--client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

2.4.5.7 启动etcd集群

systemctl daemon-reload
systemctl enable --now etcd.service
systemctl status etcd


[root@k8s-master3 etcd]# systemctl daemon-reload
[root@k8s-master3 etcd]# systemctl enable --now etcd.service
[root@k8s-master3 etcd]# systemctl status etcd
● etcd.service - Etcd Server
Loaded: loaded (/etc/systemd/system/etcd.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2024-01-04 11:21:03 CST; 1min 15s ago
Main PID: 4515 (etcd)
CGroup: /system.slice/etcd.service
└─4515 /usr/local/bin/etcd --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-cert...

Jan 04 11:21:03 k8s-master3 etcd[4515]: {"level":"info","ts":"2024-01-04T11:21:03.193129+0800","caller":"api/capability.go:75","msg":"enabled capabilit...on":"3.0"}
Jan 04 11:21:03 k8s-master3 etcd[4515]: {"level":"info","ts":"2024-01-04T11:21:03.19358+0800","caller":"etcdserver/server.go:2066","msg":"published local member ...
Jan 04 11:21:03 k8s-master3 etcd[4515]: {"
level":"info","ts":"2024-01-04T11:21:03.1937+0800","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
Jan 04 11:21:03 k8s-master3 etcd[4515]: {"
level":"info","ts":"2024-01-04T11:21:03.194023+0800","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
Jan 04 11:21:03 k8s-master3 etcd[4515]: {"
level":"info","ts":"2024-01-04T11:21:03.194473+0800","caller":"embed/serve.go:187","msg":"serving client traf...0.1:2379"}
Jan 04 11:21:03 k8s-master3 systemd[1]: Started Etcd Server.
Jan 04 11:21:03 k8s-master3 etcd[4515]: {"
level":"info","ts":"2024-01-04T11:21:03.195005+0800","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
Jan 04 11:21:03 k8s-master3 etcd[4515]: {"
level":"info","ts":"2024-01-04T11:21:03.195052+0800","caller":"etcdmain/main.go:50","msg":"successfully notif...t daemon"}
Jan 04 11:21:03 k8s-master3 etcd[4515]: {"
level":"info","ts":"2024-01-04T11:21:03.196251+0800","caller":"embed/serve.go:250","msg":"serving client traf...146:2379"}
Jan 04 11:21:04 k8s-master3 etcd[4515]: {"
level":"info","ts":"2024-01-04T11:21:04.065509+0800","caller":"membership/cluster.go:576","msg":"updated clus...to":"3.5"}
Hint: Some lines were ellipsized, use -l to show in full.

 

2.4.5.8 验证集群状态

[root@k8s-master3 etcd]# etcdctl member list
3ed6f5bbee8d7853, started, etcd2, https://192.168.198.145:2380, https://192.168.198.145:2379, false
b6b5743177b982b2, started, etcd3, https://192.168.198.146:2380, https://192.168.198.146:2379, false
f79986bfdb812e09, started, etcd1, https://192.168.198.144:2380, https://192.168.198.144:2379, false
[root@k8s-master3 etcd]#
[root@k8s-master3 etcd]#
[root@k8s-master3 etcd]#
[root@k8s-master3 etcd]#
[root@k8s-master3 etcd]# etcdctl member list -w table
+------------------+---------+-------+------------------------------+------------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+------------------------------+------------------------------+------------+
|
3ed6f5bbee8d7853 | started | etcd2 | https://192.168.198.145:2380 | https://192.168.198.145:2379 | false |
| b6b5743177b982b2 | started | etcd3 | https://192.168.198.146:2380 | https://192.168.198.146:2379 | false |
| f79986bfdb812e09 | started | etcd1 | https://192.168.198.144:2380 | https://192.168.198.144:2379 | false |
+------------------+---------+-------+------------------------------+------------------------------+------------+
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.198.144:2379,https://192.168.198.145:2379,https://192.168.198.146:2379 endpoint status

#IS LEADER 为true的为主节点
[root@k8s-master3 etcd]# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.198.144:2379,https://192.168.198.145:2379,https://192.168.198.146:2379 endpoint status
+------------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+------------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|
https://192.168.198.144:2379 | f79986bfdb812e09 | 3.5.11 | 20 kB | true | false | 2 | 9 | 9 | |
| https://192.168.198.145:2379 | 3ed6f5bbee8d7853 | 3.5.11 | 20 kB | false | false | 2 | 9 | 9 | |
|
https://192.168.198.146:2379 | b6b5743177b982b2 | 3.5.11 | 20 kB | false | false | 2 | 9 | 9 | |
+------------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

 

2.5 Kubernetes集群部署

2.5.1 Kubernetes软件包下载

wget https://dl.k8s.io/v1.28.0/kubernetes-server-linux-amd64.tar.gz

 

2.5.2 Kubernetes软件包安装

tar -xvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes/server/bin/

cp kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/

2.5.3 Kubernetes软件分发

scp kube-apiserver kube-controller-manager kube-scheduler kubectl k8s-master2:/usr/local/bin/
scp kube-apiserver kube-controller-manager kube-scheduler kubectl k8s-master3:/usr/local/bin/

 

2.5.4 在集群节点上创建目录

#master节点上创建
mkdir -p /etc/kubernetes/
mkdir -p /etc/kubernetes/ssl
mkdir -p /var/log/kubernetes

2.5.5 部署api-server

2.5.5.1 创建apiserver证书请求文件

cat > kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.198.144",
"192.168.198.145",
"192.168.198.146",
"192.168.198.147",
"192.168.198.148",
"192.168.198.149",
"192.168.198.150",
"192.168.198.151",
"192.168.198.152",
"192.168.198.100",
"10.96.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF

2.5.5.2 生成apiserver证书及token文件

 生成kube-apiserver.csr、kube-apiserver-key.pem、kube-apiserver.pem

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
#生成token.csv
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

2.5.5.3 创建apiserver服务配置文件

cat > /etc/kubernetes/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=192.168.198.144 \
--advertise-address=192.168.198.144 \
--secure-port=6443 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.96.0.0/16 \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api \
--etcd-cafile=/etc/etcd/ssl/ca.pem \
--etcd-certfile=/etc/etcd/ssl/etcd.pem \
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \
--etcd-servers=https://192.168.198.144:2379,https://192.168.198.145:2379,https://192.168.198.146:2379 \
--allow-privileged=true \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-apiserver-audit.log \
--event-ttl=1h \
--v=4"

EOF

2.5.5.4 创建apiserver服务管理配置文件

 

cat > /etc/systemd/system/kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service

[Service]
EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

2.5.5.5 同步文件到集群master节点

cp ca*.pem /etc/kubernetes/ssl/
cp kube-apiserver*.pem /etc/kubernetes/ssl/
cp token.csv /etc/kubernetes/

scp /etc/kubernetes/ssl/ca*.pem k8s-master2:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/ca*.pem k8s-master3:/etc/kubernetes/ssl

scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master2:/etc/kubernetes/ssl
scp /etc/kubernetes/ssl/kube-apiserver*.pem k8s-master3:/etc/kubernetes/ssl

scp /etc/kubernetes/token.csv k8s-master2:/etc/kubernetes
scp /etc/kubernetes/token.csv k8s-master3:/etc/kubernetes
#需要修改为对应主机的ip地址
scp /etc/kubernetes/kube-apiserver.conf k8s-master2:/etc/kubernetes/kube-apiserver.conf
scp /etc/kubernetes/kube-apiserver.conf k8s-master3:/etc/kubernetes/kube-apiserver.conf
scp /etc/systemd/system/kube-apiserver.service k8s-master2:/etc/systemd/system/kube-apiserver.service
scp /etc/systemd/system/kube-apiserver.service k8s-master3:/etc/systemd/system/kube-apiserver.service

2.5.5.6 启动apiserver服务

systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl status kube-apiserver


# 测试
curl --insecure https://192.168.198.144:6443/
curl --insecure https://192.168.198.145:6443/
curl --insecure https://192.168.198.146:6443/
curl --insecure https://192.168.198.100:6443/

[root@k8s-master1 kubernetes]# curl --insecure https://192.168.198.100:6443/
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "Unauthorized",
"reason": "Unauthorized",
"code": 401
}

 

2.5.6 部署kubectl

2.5.6.1 创建kubectl证书请求文件

cat > admin-csr.json << "EOF"
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "system"
}
]
}
EOF

2.5.6.2 生成证书文件

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

2.5.6.3 复制文件到指定目录

cp admin*.pem /etc/kubernetes/ssl/

2.5.6.4 生成kube.config配置文件

kube.config 为 kubectl 的配置文件,包含访问 apiserver 的所有信息,如 apiserver 地址、CA 证书和自身使用的证书

[root@k8s-master1 k8s-work]# kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.198.100:6443 --kubeconfig=kube.config
Cluster "kubernetes" set.

[root@k8s-master1 k8s-work]# cat kube.config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURtakNDQW9LZ0F3SUJBZ0lVTzFVN3NvbE1URUxNb2lBU2VyNlV0RlFDbjZzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEkwTURFd05EQXhNVGd3TUZvWERUTTBNREV3TVRBeE1UZ3dNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RURBTwpCZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcjJxTGlPc0NpK2RZbnEwK1lha2EKUHZoZTdFMFY0c3BUd1ZkMWNSVGU3eTY1ekdXY2tKOXVCKzNKWjl3bGhkL3d5Y0NuaTN0d2RoVVh2bW1RRTdJcwphVkJhenpzRjFGcDE0MFdkbjVZcGZEd0V4OWR3QitaVE83SnZncXFBUTlnTlRyaGZ3UkpVeUVIZDcvaUY2NE5RCkhlTTJmQm1QUHpXeWwvdkc1bVB3UVBvL1krcnMxckZ6c2JVRG1EYmJ5enQxNjZXblVlVWozYzB3aFp1Y2hOb1oKSDFwbjZXTXFIdWxhM2FuaElQWGJVa0VXK1FVVzZLU1FnVm1MVzZZbVlEZGROYisxWkhxb1FKQUI5YUNRRlgrZgpnTXQzSnBIQXJPY3hWTG1leStMYzQ5RGhtQVZDZVcxQkZOeEhpTVZoZGo1czljWjQyUlgvV1diWlZpWjVGeGsyCldRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlYKSFE0RUZnUVVLYzZIV3RTRlQ3R3BFTFJtVUhaeldxUHdhRG93RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUYxZQpUY21oZEdzcUI5YmwxVjFVeE0wczg4dm05VjJ1dnhIOEhEVmtIdjJ4eUxucEFwVWpHQldPZ3JRdSsyMXErdE1lCkVNa2hRUzlFOGRQTkQzSkgxbmdob3lyN0dad3k0SFVmQkxoOXdNTVNHd3plaWJJd3lXRUtQMUlBbmlIbmFxUW4KTDBUMlVzZk81c0x6MXRiRHlKWXZQbXA4aHVJTkRCdFUrSmtGY3huM21lanluSFpVeEJEZExURUtqWlNib1VzVAo2NXArMzNzalhlc2wwQVZEMlg0UUo5NytWa0I4ZXNFcnZQaUVRdmJyWG9OQTVROVNZNk5xZ1JtUGo4VjNZTEdSCldXVjB3SGphT0dCUWx1OUlqbnBIWGs2U2JmTmpCd25vS01iWEFOcWVUWDdNSXgwOWd6QmphUTJWWUlnSFh3VloKbGY3azIxMDBkR0JNajRMd3hwND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://192.168.198.100:6443
name: kubernetes
contexts: null
current-context: ""
kind: Config
preferences: {}
users: null
[root@k8s-master1 k8s-work]# kubectl config set-credentials admin --client-certificate=admin.pem --client-key=admin-key.pem --embed-certs=true --kubeconfig=kube.config
User "admin" set.

[root@k8s-master1 k8s-work]# cat kube.config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURtakNDQW9LZ0F3SUJBZ0lVTzFVN3NvbE1URUxNb2lBU2VyNlV0RlFDbjZzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEkwTURFd05EQXhNVGd3TUZvWERUTTBNREV3TVRBeE1UZ3dNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RURBTwpCZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcjJxTGlPc0NpK2RZbnEwK1lha2EKUHZoZTdFMFY0c3BUd1ZkMWNSVGU3eTY1ekdXY2tKOXVCKzNKWjl3bGhkL3d5Y0NuaTN0d2RoVVh2bW1RRTdJcwphVkJhenpzRjFGcDE0MFdkbjVZcGZEd0V4OWR3QitaVE83SnZncXFBUTlnTlRyaGZ3UkpVeUVIZDcvaUY2NE5RCkhlTTJmQm1QUHpXeWwvdkc1bVB3UVBvL1krcnMxckZ6c2JVRG1EYmJ5enQxNjZXblVlVWozYzB3aFp1Y2hOb1oKSDFwbjZXTXFIdWxhM2FuaElQWGJVa0VXK1FVVzZLU1FnVm1MVzZZbVlEZGROYisxWkhxb1FKQUI5YUNRRlgrZgpnTXQzSnBIQXJPY3hWTG1leStMYzQ5RGhtQVZDZVcxQkZOeEhpTVZoZGo1czljWjQyUlgvV1diWlZpWjVGeGsyCldRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlYKSFE0RUZnUVVLYzZIV3RTRlQ3R3BFTFJtVUhaeldxUHdhRG93RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUYxZQpUY21oZEdzcUI5YmwxVjFVeE0wczg4dm05VjJ1dnhIOEhEVmtIdjJ4eUxucEFwVWpHQldPZ3JRdSsyMXErdE1lCkVNa2hRUzlFOGRQTkQzSkgxbmdob3lyN0dad3k0SFVmQkxoOXdNTVNHd3plaWJJd3lXRUtQMUlBbmlIbmFxUW4KTDBUMlVzZk81c0x6MXRiRHlKWXZQbXA4aHVJTkRCdFUrSmtGY3huM21lanluSFpVeEJEZExURUtqWlNib1VzVAo2NXArMzNzalhlc2wwQVZEMlg0UUo5NytWa0I4ZXNFcnZQaUVRdmJyWG9OQTVROVNZNk5xZ1JtUGo4VjNZTEdSCldXVjB3SGphT0dCUWx1OUlqbnBIWGs2U2JmTmpCd25vS01iWEFOcWVUWDdNSXgwOWd6QmphUTJWWUlnSFh3VloKbGY3azIxMDBkR0JNajRMd3hwND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://192.168.198.100:6443
name: kubernetes
contexts: null
current-context: ""
kind: Config
preferences: {}
users:
- name: admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQzVENDQXNXZ0F3SUJBZ0lVWUpLNkpBSXgvMlRXZFR0cGhOejd4WmxscVBFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEkwTURFd05EQTNNamt3TUZvWERUTTBNREV3TVRBM01qa3dNRm93YXpFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVE4d0RRWURWUVFMRXdaemVYTjBaVzB4RGpBTUJnTlZCQU1UCkJXRmtiV2x1TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF0RkVvOTNENnlXSysKMXpDRnhhMmwxUUNaUURxWnphWEU4YW9EQktEOHFES0V6YllUaE5PbVVBS1JiUUsrN2l1aXlVdTdRY3pqektXeApMSnhHSlpvRms3N29tbENMOVNlWURJY0EwM3l4cnlOTXR6eS92dkVkY0VtUGJuT1FaRytoMVBQQzRIZHJRdjh0CkNZdG1wcnhpSzdiRXZ4dDVWTEZXS2lDRnFMZHRHL3VGd0VLclZRc2RhQlVwNktkOHpoeFRTOGw2REZuaHVSU0gKZkVSK1cyeC8xUXF4Ri8zdkx2dzR0QzFkMnYwSFBqODB5aFozdXdNQWY0VEdKMXdFSmRCQ0VMSUFkUE9Jd1g1egpCTW44R0dPdkh1T2hmaGFOZ0pLUCtKbEUrZmlkMnFYZUoyNHBjM2Y2VGk2NGlyZno5SjhJbGpsMTNSZ1pyNWZlCjQrNTJ6VmZlYlFJREFRQUJvMzh3ZlRBT0JnTlZIUThCQWY4RUJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUIKQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZHcGlWY2RYT0FLdgpzTFlvS0QyMGlQK1o5Z0dHTUI4R0ExVWRJd1FZTUJhQUZDbk9oMXJVaFUreHFSQzBabEIyYzFxajhHZzZNQTBHCkNTcUdTSWIzRFFFQkN3VUFBNElCQVFBYjVvT1VJdUJpY01jOW1hcTV4TExJY3I1Um5vcUIvbmxxU3krUFpaMHcKa2xrOEMwbzk0Q0FZb2VJRnhSNWlTcTBZdHRNbG5KRnJxSlcyR0ZENzZOeFRVeEtCSUtiL2llNVNMS3J6VVhXNApacG9mKzFHaUx6dnZRMENIYTZRQkIySkhpZjdSN0Y3RFY3b29JM3REdWNvNkZKcHpscmZNTHVoNHdwTkkyaGlOCk93cHk3Qm9TQVNOR2ZSRWRYOHJvZnRlVEF3RVpOM0txUEtianBETlBOS2ZSZzliRzhReElhMkJ6L0NVYkE0Q2oKR2tEZy9uQnNuMmJoejBlU1ZIdzJYL0tjUUhaSUhHSlBJajhjQ2NHK0tvMFhobll1cTEwVzk0cnppUE5JOHZZYwo4ZEVTQ2ZLT0dBdURmQWxRbTQyejUzaktYMWovMCs2dzluSG5ZKytXa3VaYQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdEZFbzkzRDZ5V0srMXpDRnhhMmwxUUNaUURxWnphWEU4YW9EQktEOHFES0V6YllUCmhOT21VQUtSYlFLKzdpdWl5VXU3UWN6anpLV3hMSnhHSlpvRms3N29tbENMOVNlWURJY0EwM3l4cnlOTXR6eS8KdnZFZGNFbVBibk9RWkcraDFQUEM0SGRyUXY4dENZdG1wcnhpSzdiRXZ4dDVWTEZXS2lDRnFMZHRHL3VGd0VLcgpWUXNkYUJVcDZLZDh6aHhUUzhsNkRGbmh1UlNIZkVSK1cyeC8xUXF4Ri8zdkx2dzR0QzFkMnYwSFBqODB5aFozCnV3TUFmNFRHSjF3RUpkQkNFTElBZFBPSXdYNXpCTW44R0dPdkh1T2hmaGFOZ0pLUCtKbEUrZmlkMnFYZUoyNHAKYzNmNlRpNjRpcmZ6OUo4SWxqbDEzUmdacjVmZTQrNTJ6VmZlYlFJREFRQUJBb0lCQVFDY3UzTDFhWkhEWEg1dgpRM0R6ZTFXS2lLT3NyWU1rdW5NdWI4MkJ4NEQxbmp2TEp2bGVXaTNVbS9iV0h5M2dqYk5JYnZoTVlKQ2RRR1I1ClZ6aXQxR3dHbVVsTFlMbldsTnpYL3J6Y0Z5WEhDdExTN3czb0pXS21TSHBRMGtodTFJMkJNWVJ4WWJ1dEYycUoKUWs4dW5NNWtHdEIzSUtWYzFXd0U0Qkh0cmNvOEovVGxkRVRkM3I0cTlKOHFEbXlQTjFRVUlDZ0VzSkdtV2RoNwplQkNpWEdSck9YR3VEcTFuQjB1ZjlLOEVkVE83MEU5ME9GUzFHdW1kbWhKOEFidGRYN1dRZU8wY25FRGRmRDVwCjZvS09nbXU2Q0xIU09ZZENiWndUUWZjUXlkU0JLUTdWOXpQNk85TFlBSHVDZlRkNVE5STg1elcvN0FrblB3YVkKU0JWMlhMdmhBb0dCQU9nRG1QNk51bTdJY0ZPY1oyT3MrL1AwZ1BoR0Q4djlCY25QSnZ0UHJDMmZsNXVhcmF2TQpSbkhXS2w2Wi9DYll1azljQ0dGdXdNMnBEYm1GakU2UUlxbjNNSXpQRGZ1czBBcFFmNWk1b0duTXh5eVlqTGxICkxzK3R4VDA5Znk0NzYyOENwOVRuRlJENzBxZXdSSDdKS3RXL3FlZldzaEprQzBoQzd1TEsrSDA1QW9HQkFNYjEKWGYvTXhLUHJsOTN1R21yMUlYMTNjend3dXpsWVpyUFdaUVBvdVpsZmIvODdwNTZxRHRjWkwrajlkVEswTU5BQQpERFJXbG1VSzBrYmpKTndXdEdPcTN0ZmFRU1pnV2I0amhHR0Y1cWp0alVMSHNUZDB3OG8yREVESGpwZW5DMG85CktkWWQrRkZ6NDBkVVhwK3RHODNZa25JcnNNeVRUY3BGdllwSVF4N1ZBb0dCQUtxZHlxcVhDdHhnNWNsMm9NazUKOG1ZcURaV0Y0Q1FBUTN0dXJKbnVzdzB4NlVseWEvaUVWZUZzdnVlbWtUajM4N3BjVVlWazdyL09hOXRjREJ2Ugovc3ZDalo5ZXZFZXhnNk95SXNMcTdyNGU2dkV1bFgzQ2pQZ0lMNTJqVloxb1R1L3BvZ1g4a1E5V1FFazBaSXBmCjRQSWk2ZzBsWXZvSFBBeTl1L0pubEdoeEFvR0JBTGhhWjIwOUdnQWhyeWpQRmQrQm9EU1gyRWt2aG13T2c2dWoKdnhvdUxMdjIrTm54TnRJSUZaUXVISHl4VGtWYlBkZWVFN0R6Z292QnlUSXlDdGQ4bWsyMzZLRHQ5V3hQM3hnVgo1UFpRa25oNUZXbUppNll0SmJaYSttT1VCWVowSER3QURLSUFSeldDUWxpM3pxMzZRMGNycEJieWNQSStrOWdYCll4ZWMrY1M1QW9HQUUyN1R0by9MNVpxMFpNOUFkM3ZZSUhsRmM4TWdMU3lRbDhaZENsd25LUWNiVzdNVkYxWGwKV1duWEtMU2k0SEVmcmNxMlpPSnM3UHlMMDJrWXgwV0FDZnp1VmtzQ1BvRUZScUdJMFdkWnBPaXhkTWdlZDYrYgpDSnd3Rzd6MFFTOHg5aWhhWllpdUY2VWhFME8zNjFRSVhqMUVZb1pWQzU3Uy82OTR6cVpDNzhNPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
[root@k8s-master1 k8s-work]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=kube.config
Context "kubernetes" created.
[root@k8s-master1 k8s-work]# cat kube.config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURtakNDQW9LZ0F3SUJBZ0lVTzFVN3NvbE1URUxNb2lBU2VyNlV0RlFDbjZzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEkwTURFd05EQXhNVGd3TUZvWERUTTBNREV3TVRBeE1UZ3dNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RURBTwpCZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcjJxTGlPc0NpK2RZbnEwK1lha2EKUHZoZTdFMFY0c3BUd1ZkMWNSVGU3eTY1ekdXY2tKOXVCKzNKWjl3bGhkL3d5Y0NuaTN0d2RoVVh2bW1RRTdJcwphVkJhenpzRjFGcDE0MFdkbjVZcGZEd0V4OWR3QitaVE83SnZncXFBUTlnTlRyaGZ3UkpVeUVIZDcvaUY2NE5RCkhlTTJmQm1QUHpXeWwvdkc1bVB3UVBvL1krcnMxckZ6c2JVRG1EYmJ5enQxNjZXblVlVWozYzB3aFp1Y2hOb1oKSDFwbjZXTXFIdWxhM2FuaElQWGJVa0VXK1FVVzZLU1FnVm1MVzZZbVlEZGROYisxWkhxb1FKQUI5YUNRRlgrZgpnTXQzSnBIQXJPY3hWTG1leStMYzQ5RGhtQVZDZVcxQkZOeEhpTVZoZGo1czljWjQyUlgvV1diWlZpWjVGeGsyCldRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlYKSFE0RUZnUVVLYzZIV3RTRlQ3R3BFTFJtVUhaeldxUHdhRG93RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUYxZQpUY21oZEdzcUI5YmwxVjFVeE0wczg4dm05VjJ1dnhIOEhEVmtIdjJ4eUxucEFwVWpHQldPZ3JRdSsyMXErdE1lCkVNa2hRUzlFOGRQTkQzSkgxbmdob3lyN0dad3k0SFVmQkxoOXdNTVNHd3plaWJJd3lXRUtQMUlBbmlIbmFxUW4KTDBUMlVzZk81c0x6MXRiRHlKWXZQbXA4aHVJTkRCdFUrSmtGY3huM21lanluSFpVeEJEZExURUtqWlNib1VzVAo2NXArMzNzalhlc2wwQVZEMlg0UUo5NytWa0I4ZXNFcnZQaUVRdmJyWG9OQTVROVNZNk5xZ1JtUGo4VjNZTEdSCldXVjB3SGphT0dCUWx1OUlqbnBIWGs2U2JmTmpCd25vS01iWEFOcWVUWDdNSXgwOWd6QmphUTJWWUlnSFh3VloKbGY3azIxMDBkR0JNajRMd3hwND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://192.168.198.100:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: admin
name: kubernetes
current-context: ""
kind: Config
preferences: {}
users:
- name: admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQzVENDQXNXZ0F3SUJBZ0lVWUpLNkpBSXgvMlRXZFR0cGhOejd4WmxscVBFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEkwTURFd05EQTNNamt3TUZvWERUTTBNREV3TVRBM01qa3dNRm93YXpFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVE4d0RRWURWUVFMRXdaemVYTjBaVzB4RGpBTUJnTlZCQU1UCkJXRmtiV2x1TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF0RkVvOTNENnlXSysKMXpDRnhhMmwxUUNaUURxWnphWEU4YW9EQktEOHFES0V6YllUaE5PbVVBS1JiUUsrN2l1aXlVdTdRY3pqektXeApMSnhHSlpvRms3N29tbENMOVNlWURJY0EwM3l4cnlOTXR6eS92dkVkY0VtUGJuT1FaRytoMVBQQzRIZHJRdjh0CkNZdG1wcnhpSzdiRXZ4dDVWTEZXS2lDRnFMZHRHL3VGd0VLclZRc2RhQlVwNktkOHpoeFRTOGw2REZuaHVSU0gKZkVSK1cyeC8xUXF4Ri8zdkx2dzR0QzFkMnYwSFBqODB5aFozdXdNQWY0VEdKMXdFSmRCQ0VMSUFkUE9Jd1g1egpCTW44R0dPdkh1T2hmaGFOZ0pLUCtKbEUrZmlkMnFYZUoyNHBjM2Y2VGk2NGlyZno5SjhJbGpsMTNSZ1pyNWZlCjQrNTJ6VmZlYlFJREFRQUJvMzh3ZlRBT0JnTlZIUThCQWY4RUJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUIKQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZHcGlWY2RYT0FLdgpzTFlvS0QyMGlQK1o5Z0dHTUI4R0ExVWRJd1FZTUJhQUZDbk9oMXJVaFUreHFSQzBabEIyYzFxajhHZzZNQTBHCkNTcUdTSWIzRFFFQkN3VUFBNElCQVFBYjVvT1VJdUJpY01jOW1hcTV4TExJY3I1Um5vcUIvbmxxU3krUFpaMHcKa2xrOEMwbzk0Q0FZb2VJRnhSNWlTcTBZdHRNbG5KRnJxSlcyR0ZENzZOeFRVeEtCSUtiL2llNVNMS3J6VVhXNApacG9mKzFHaUx6dnZRMENIYTZRQkIySkhpZjdSN0Y3RFY3b29JM3REdWNvNkZKcHpscmZNTHVoNHdwTkkyaGlOCk93cHk3Qm9TQVNOR2ZSRWRYOHJvZnRlVEF3RVpOM0txUEtianBETlBOS2ZSZzliRzhReElhMkJ6L0NVYkE0Q2oKR2tEZy9uQnNuMmJoejBlU1ZIdzJYL0tjUUhaSUhHSlBJajhjQ2NHK0tvMFhobll1cTEwVzk0cnppUE5JOHZZYwo4ZEVTQ2ZLT0dBdURmQWxRbTQyejUzaktYMWovMCs2dzluSG5ZKytXa3VaYQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdEZFbzkzRDZ5V0srMXpDRnhhMmwxUUNaUURxWnphWEU4YW9EQktEOHFES0V6YllUCmhOT21VQUtSYlFLKzdpdWl5VXU3UWN6anpLV3hMSnhHSlpvRms3N29tbENMOVNlWURJY0EwM3l4cnlOTXR6eS8KdnZFZGNFbVBibk9RWkcraDFQUEM0SGRyUXY4dENZdG1wcnhpSzdiRXZ4dDVWTEZXS2lDRnFMZHRHL3VGd0VLcgpWUXNkYUJVcDZLZDh6aHhUUzhsNkRGbmh1UlNIZkVSK1cyeC8xUXF4Ri8zdkx2dzR0QzFkMnYwSFBqODB5aFozCnV3TUFmNFRHSjF3RUpkQkNFTElBZFBPSXdYNXpCTW44R0dPdkh1T2hmaGFOZ0pLUCtKbEUrZmlkMnFYZUoyNHAKYzNmNlRpNjRpcmZ6OUo4SWxqbDEzUmdacjVmZTQrNTJ6VmZlYlFJREFRQUJBb0lCQVFDY3UzTDFhWkhEWEg1dgpRM0R6ZTFXS2lLT3NyWU1rdW5NdWI4MkJ4NEQxbmp2TEp2bGVXaTNVbS9iV0h5M2dqYk5JYnZoTVlKQ2RRR1I1ClZ6aXQxR3dHbVVsTFlMbldsTnpYL3J6Y0Z5WEhDdExTN3czb0pXS21TSHBRMGtodTFJMkJNWVJ4WWJ1dEYycUoKUWs4dW5NNWtHdEIzSUtWYzFXd0U0Qkh0cmNvOEovVGxkRVRkM3I0cTlKOHFEbXlQTjFRVUlDZ0VzSkdtV2RoNwplQkNpWEdSck9YR3VEcTFuQjB1ZjlLOEVkVE83MEU5ME9GUzFHdW1kbWhKOEFidGRYN1dRZU8wY25FRGRmRDVwCjZvS09nbXU2Q0xIU09ZZENiWndUUWZjUXlkU0JLUTdWOXpQNk85TFlBSHVDZlRkNVE5STg1elcvN0FrblB3YVkKU0JWMlhMdmhBb0dCQU9nRG1QNk51bTdJY0ZPY1oyT3MrL1AwZ1BoR0Q4djlCY25QSnZ0UHJDMmZsNXVhcmF2TQpSbkhXS2w2Wi9DYll1azljQ0dGdXdNMnBEYm1GakU2UUlxbjNNSXpQRGZ1czBBcFFmNWk1b0duTXh5eVlqTGxICkxzK3R4VDA5Znk0NzYyOENwOVRuRlJENzBxZXdSSDdKS3RXL3FlZldzaEprQzBoQzd1TEsrSDA1QW9HQkFNYjEKWGYvTXhLUHJsOTN1R21yMUlYMTNjend3dXpsWVpyUFdaUVBvdVpsZmIvODdwNTZxRHRjWkwrajlkVEswTU5BQQpERFJXbG1VSzBrYmpKTndXdEdPcTN0ZmFRU1pnV2I0amhHR0Y1cWp0alVMSHNUZDB3OG8yREVESGpwZW5DMG85CktkWWQrRkZ6NDBkVVhwK3RHODNZa25JcnNNeVRUY3BGdllwSVF4N1ZBb0dCQUtxZHlxcVhDdHhnNWNsMm9NazUKOG1ZcURaV0Y0Q1FBUTN0dXJKbnVzdzB4NlVseWEvaUVWZUZzdnVlbWtUajM4N3BjVVlWazdyL09hOXRjREJ2Ugovc3ZDalo5ZXZFZXhnNk95SXNMcTdyNGU2dkV1bFgzQ2pQZ0lMNTJqVloxb1R1L3BvZ1g4a1E5V1FFazBaSXBmCjRQSWk2ZzBsWXZvSFBBeTl1L0pubEdoeEFvR0JBTGhhWjIwOUdnQWhyeWpQRmQrQm9EU1gyRWt2aG13T2c2dWoKdnhvdUxMdjIrTm54TnRJSUZaUXVISHl4VGtWYlBkZWVFN0R6Z292QnlUSXlDdGQ4bWsyMzZLRHQ5V3hQM3hnVgo1UFpRa25oNUZXbUppNll0SmJaYSttT1VCWVowSER3QURLSUFSeldDUWxpM3pxMzZRMGNycEJieWNQSStrOWdYCll4ZWMrY1M1QW9HQUUyN1R0by9MNVpxMFpNOUFkM3ZZSUhsRmM4TWdMU3lRbDhaZENsd25LUWNiVzdNVkYxWGwKV1duWEtMU2k0SEVmcmNxMlpPSnM3UHlMMDJrWXgwV0FDZnp1VmtzQ1BvRUZScUdJMFdkWnBPaXhkTWdlZDYrYgpDSnd3Rzd6MFFTOHg5aWhhWllpdUY2VWhFME8zNjFRSVhqMUVZb1pWQzU3Uy82OTR6cVpDNzhNPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
[root@k8s-master1 k8s-work]# kubectl config use-context kubernetes --kubeconfig=kube.config
Switched to context "kubernetes".
[root@k8s-master1 k8s-work]# cat kube.config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURtakNDQW9LZ0F3SUJBZ0lVTzFVN3NvbE1URUxNb2lBU2VyNlV0RlFDbjZzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEkwTURFd05EQXhNVGd3TUZvWERUTTBNREV3TVRBeE1UZ3dNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RURBTwpCZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcjJxTGlPc0NpK2RZbnEwK1lha2EKUHZoZTdFMFY0c3BUd1ZkMWNSVGU3eTY1ekdXY2tKOXVCKzNKWjl3bGhkL3d5Y0NuaTN0d2RoVVh2bW1RRTdJcwphVkJhenpzRjFGcDE0MFdkbjVZcGZEd0V4OWR3QitaVE83SnZncXFBUTlnTlRyaGZ3UkpVeUVIZDcvaUY2NE5RCkhlTTJmQm1QUHpXeWwvdkc1bVB3UVBvL1krcnMxckZ6c2JVRG1EYmJ5enQxNjZXblVlVWozYzB3aFp1Y2hOb1oKSDFwbjZXTXFIdWxhM2FuaElQWGJVa0VXK1FVVzZLU1FnVm1MVzZZbVlEZGROYisxWkhxb1FKQUI5YUNRRlgrZgpnTXQzSnBIQXJPY3hWTG1leStMYzQ5RGhtQVZDZVcxQkZOeEhpTVZoZGo1czljWjQyUlgvV1diWlZpWjVGeGsyCldRSURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlYKSFE0RUZnUVVLYzZIV3RTRlQ3R3BFTFJtVUhaeldxUHdhRG93RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUYxZQpUY21oZEdzcUI5YmwxVjFVeE0wczg4dm05VjJ1dnhIOEhEVmtIdjJ4eUxucEFwVWpHQldPZ3JRdSsyMXErdE1lCkVNa2hRUzlFOGRQTkQzSkgxbmdob3lyN0dad3k0SFVmQkxoOXdNTVNHd3plaWJJd3lXRUtQMUlBbmlIbmFxUW4KTDBUMlVzZk81c0x6MXRiRHlKWXZQbXA4aHVJTkRCdFUrSmtGY3huM21lanluSFpVeEJEZExURUtqWlNib1VzVAo2NXArMzNzalhlc2wwQVZEMlg0UUo5NytWa0I4ZXNFcnZQaUVRdmJyWG9OQTVROVNZNk5xZ1JtUGo4VjNZTEdSCldXVjB3SGphT0dCUWx1OUlqbnBIWGs2U2JmTmpCd25vS01iWEFOcWVUWDdNSXgwOWd6QmphUTJWWUlnSFh3VloKbGY3azIxMDBkR0JNajRMd3hwND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://192.168.198.100:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: admin
name: kubernetes
current-context: kubernetes
kind: Config
preferences: {}
users:
- name: admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQzVENDQXNXZ0F3SUJBZ0lVWUpLNkpBSXgvMlRXZFR0cGhOejd4WmxscVBFd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEVEQU9CZ05WQkFvVEIydDFZbVZ0YzJJeEN6QUpCZ05WQkFzVEFrTk9NUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEkwTURFd05EQTNNamt3TUZvWERUTTBNREV3TVRBM01qa3dNRm93YXpFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVE4d0RRWURWUVFMRXdaemVYTjBaVzB4RGpBTUJnTlZCQU1UCkJXRmtiV2x1TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF0RkVvOTNENnlXSysKMXpDRnhhMmwxUUNaUURxWnphWEU4YW9EQktEOHFES0V6YllUaE5PbVVBS1JiUUsrN2l1aXlVdTdRY3pqektXeApMSnhHSlpvRms3N29tbENMOVNlWURJY0EwM3l4cnlOTXR6eS92dkVkY0VtUGJuT1FaRytoMVBQQzRIZHJRdjh0CkNZdG1wcnhpSzdiRXZ4dDVWTEZXS2lDRnFMZHRHL3VGd0VLclZRc2RhQlVwNktkOHpoeFRTOGw2REZuaHVSU0gKZkVSK1cyeC8xUXF4Ri8zdkx2dzR0QzFkMnYwSFBqODB5aFozdXdNQWY0VEdKMXdFSmRCQ0VMSUFkUE9Jd1g1egpCTW44R0dPdkh1T2hmaGFOZ0pLUCtKbEUrZmlkMnFYZUoyNHBjM2Y2VGk2NGlyZno5SjhJbGpsMTNSZ1pyNWZlCjQrNTJ6VmZlYlFJREFRQUJvMzh3ZlRBT0JnTlZIUThCQWY4RUJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUIKQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZHcGlWY2RYT0FLdgpzTFlvS0QyMGlQK1o5Z0dHTUI4R0ExVWRJd1FZTUJhQUZDbk9oMXJVaFUreHFSQzBabEIyYzFxajhHZzZNQTBHCkNTcUdTSWIzRFFFQkN3VUFBNElCQVFBYjVvT1VJdUJpY01jOW1hcTV4TExJY3I1Um5vcUIvbmxxU3krUFpaMHcKa2xrOEMwbzk0Q0FZb2VJRnhSNWlTcTBZdHRNbG5KRnJxSlcyR0ZENzZOeFRVeEtCSUtiL2llNVNMS3J6VVhXNApacG9mKzFHaUx6dnZRMENIYTZRQkIySkhpZjdSN0Y3RFY3b29JM3REdWNvNkZKcHpscmZNTHVoNHdwTkkyaGlOCk93cHk3Qm9TQVNOR2ZSRWRYOHJvZnRlVEF3RVpOM0txUEtianBETlBOS2ZSZzliRzhReElhMkJ6L0NVYkE0Q2oKR2tEZy9uQnNuMmJoejBlU1ZIdzJYL0tjUUhaSUhHSlBJajhjQ2NHK0tvMFhobll1cTEwVzk0cnppUE5JOHZZYwo4ZEVTQ2ZLT0dBdURmQWxRbTQyejUzaktYMWovMCs2dzluSG5ZKytXa3VaYQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdEZFbzkzRDZ5V0srMXpDRnhhMmwxUUNaUURxWnphWEU4YW9EQktEOHFES0V6YllUCmhOT21VQUtSYlFLKzdpdWl5VXU3UWN6anpLV3hMSnhHSlpvRms3N29tbENMOVNlWURJY0EwM3l4cnlOTXR6eS8KdnZFZGNFbVBibk9RWkcraDFQUEM0SGRyUXY4dENZdG1wcnhpSzdiRXZ4dDVWTEZXS2lDRnFMZHRHL3VGd0VLcgpWUXNkYUJVcDZLZDh6aHhUUzhsNkRGbmh1UlNIZkVSK1cyeC8xUXF4Ri8zdkx2dzR0QzFkMnYwSFBqODB5aFozCnV3TUFmNFRHSjF3RUpkQkNFTElBZFBPSXdYNXpCTW44R0dPdkh1T2hmaGFOZ0pLUCtKbEUrZmlkMnFYZUoyNHAKYzNmNlRpNjRpcmZ6OUo4SWxqbDEzUmdacjVmZTQrNTJ6VmZlYlFJREFRQUJBb0lCQVFDY3UzTDFhWkhEWEg1dgpRM0R6ZTFXS2lLT3NyWU1rdW5NdWI4MkJ4NEQxbmp2TEp2bGVXaTNVbS9iV0h5M2dqYk5JYnZoTVlKQ2RRR1I1ClZ6aXQxR3dHbVVsTFlMbldsTnpYL3J6Y0Z5WEhDdExTN3czb0pXS21TSHBRMGtodTFJMkJNWVJ4WWJ1dEYycUoKUWs4dW5NNWtHdEIzSUtWYzFXd0U0Qkh0cmNvOEovVGxkRVRkM3I0cTlKOHFEbXlQTjFRVUlDZ0VzSkdtV2RoNwplQkNpWEdSck9YR3VEcTFuQjB1ZjlLOEVkVE83MEU5ME9GUzFHdW1kbWhKOEFidGRYN1dRZU8wY25FRGRmRDVwCjZvS09nbXU2Q0xIU09ZZENiWndUUWZjUXlkU0JLUTdWOXpQNk85TFlBSHVDZlRkNVE5STg1elcvN0FrblB3YVkKU0JWMlhMdmhBb0dCQU9nRG1QNk51bTdJY0ZPY1oyT3MrL1AwZ1BoR0Q4djlCY25QSnZ0UHJDMmZsNXVhcmF2TQpSbkhXS2w2Wi9DYll1azljQ0dGdXdNMnBEYm1GakU2UUlxbjNNSXpQRGZ1czBBcFFmNWk1b0duTXh5eVlqTGxICkxzK3R4VDA5Znk0NzYyOENwOVRuRlJENzBxZXdSSDdKS3RXL3FlZldzaEprQzBoQzd1TEsrSDA1QW9HQkFNYjEKWGYvTXhLUHJsOTN1R21yMUlYMTNjend3dXpsWVpyUFdaUVBvdVpsZmIvODdwNTZxRHRjWkwrajlkVEswTU5BQQpERFJXbG1VSzBrYmpKTndXdEdPcTN0ZmFRU1pnV2I0amhHR0Y1cWp0alVMSHNUZDB3OG8yREVESGpwZW5DMG85CktkWWQrRkZ6NDBkVVhwK3RHODNZa25JcnNNeVRUY3BGdllwSVF4N1ZBb0dCQUtxZHlxcVhDdHhnNWNsMm9NazUKOG1ZcURaV0Y0Q1FBUTN0dXJKbnVzdzB4NlVseWEvaUVWZUZzdnVlbWtUajM4N3BjVVlWazdyL09hOXRjREJ2Ugovc3ZDalo5ZXZFZXhnNk95SXNMcTdyNGU2dkV1bFgzQ2pQZ0lMNTJqVloxb1R1L3BvZ1g4a1E5V1FFazBaSXBmCjRQSWk2ZzBsWXZvSFBBeTl1L0pubEdoeEFvR0JBTGhhWjIwOUdnQWhyeWpQRmQrQm9EU1gyRWt2aG13T2c2dWoKdnhvdUxMdjIrTm54TnRJSUZaUXVISHl4VGtWYlBkZWVFN0R6Z292QnlUSXlDdGQ4bWsyMzZLRHQ5V3hQM3hnVgo1UFpRa25oNUZXbUppNll0SmJaYSttT1VCWVowSER3QURLSUFSeldDUWxpM3pxMzZRMGNycEJieWNQSStrOWdYCll4ZWMrY1M1QW9HQUUyN1R0by9MNVpxMFpNOUFkM3ZZSUhsRmM4TWdMU3lRbDhaZENsd25LUWNiVzdNVkYxWGwKV1duWEtMU2k0SEVmcmNxMlpPSnM3UHlMMDJrWXgwV0FDZnp1VmtzQ1BvRUZScUdJMFdkWnBPaXhkTWdlZDYrYgpDSnd3Rzd6MFFTOHg5aWhhWllpdUY2VWhFME8zNjFRSVhqMUVZb1pWQzU3Uy82OTR6cVpDNzhNPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

 

2.5.6.5 准备kubectl配置文件并进行角色绑定

mkdir ~/.kube
cp kube.config ~/.kube/config
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config

 

2.5.6.6 查看集群状态

查看集群信息
kubectl cluster-info

查看集群组件状态
kubectl get componentstatuses

查看命名空间中资源对象
kubectl get all --all-namespaces

2.5.6.7 同步kubectl配置文件到集群其它master节点

#k8s-master2节点上,创建文件夹
mkdir /root/.kube

#k8s-master3节点上,创建文件夹
mkdir /root/.kube

#把配置文件同步过去
scp /root/.kube/config k8s-master2:/root/.kube/config
scp /root/.kube/config k8s-master3:/root/.kube/config

 

2.5.7 部署kube-controller-manager

2.5.7.1 创建kube-controller-manager证书请求文件

cat > kube-controller-manager-csr.json << "EOF"
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"192.168.198.144",
"192.168.198.145",
"192.168.198.146"
],
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF

2.5.7.2 创建kube-controller-manager证书文件

kube-controller-manager.csr     
kube-controller-manager-csr.json
kube-controller-manager-key.pem
kube-controller-manager.pem

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager

 

2.5.7.3 创建kube-controller-manager的kube-controller-manager.kubeconfig

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.198.100:6443 --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager --client-certificate=kube-controller-manager.pem --client-key=kube-controller-manager-key.pem --embed-certs=true --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

2.5.7.4 创建kube-controller-manager配置文件

cat > kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS=" \
--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
--service-cluster-ip-range=10.96.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \
--root-ca-file=/etc/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--v=2"

EOF

2.5.7.5 创建服务启动文件

cat > kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=/etc/kubernetes/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

2.5.7.6 同步文件到集群master节点

cp kube-controller-manager*.pem /etc/kubernetes/ssl/
cp kube-controller-manager.kubeconfig /etc/kubernetes/
cp kube-controller-manager.conf /etc/kubernetes/
cp kube-controller-manager.service /usr/lib/systemd/system/
scp  kube-controller-manager*.pem k8s-master2:/etc/kubernetes/ssl/
scp kube-controller-manager*.pem k8s-master3:/etc/kubernetes/ssl/
scp kube-controller-manager.kubeconfig kube-controller-manager.conf k8s-master2:/etc/kubernetes/
scp kube-controller-manager.kubeconfig kube-controller-manager.conf k8s-master3:/etc/kubernetes/
scp kube-controller-manager.service k8s-master2:/usr/lib/systemd/system/
scp kube-controller-manager.service k8s-master3:/usr/lib/systemd/system/

 

2.5.8 部署kube-scheduler

2.5.8.1 创建kube-scheduler证书请求文件

cat > kube-scheduler-csr.json << "EOF"
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"192.168.198.144",
"192.168.198.145",
"192.168.198.146"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF

2.5.8.2 生成kube-scheduler证书

kube-scheduler.csr
kube-scheduler-csr.json
kube-scheduler-key.pem
kube-scheduler.pem

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler

2.5.8.3 创建kube-scheduler的kubeconfig

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.198.100:6443 --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler --client-certificate=kube-scheduler.pem --client-key=kube-scheduler-key.pem --embed-certs=true --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

2.5.8.4 创建服务配置文件

cat > kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS=" \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
--leader-elect=true \
--v=2"

EOF

2.5.8.5创建服务启动配置文件

cat > kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

2.5.8.6 同步文件至集群master节点

cp kube-scheduler*.pem /etc/kubernetes/ssl/
cp kube-scheduler.kubeconfig /etc/kubernetes/
cp kube-scheduler.conf /etc/kubernetes/
cp kube-scheduler.service /usr/lib/systemd/system/
scp  kube-scheduler*.pem k8s-master2:/etc/kubernetes/ssl/
scp kube-scheduler*.pem k8s-master3:/etc/kubernetes/ssl/
scp kube-scheduler.kubeconfig kube-scheduler.conf k8s-master2:/etc/kubernetes/
scp kube-scheduler.kubeconfig kube-scheduler.conf k8s-master3:/etc/kubernetes/
scp kube-scheduler.service k8s-master2:/usr/lib/systemd/system/
scp kube-scheduler.service k8s-master3:/usr/lib/systemd/system/

2.5.8.7 启动服务

systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler

[root@k8s-master1 kubernetes]# systemctl status kube-scheduler
● kube-scheduler.service - Kubernetes Scheduler
Loaded: loaded (/usr/lib/systemd/system/kube-scheduler.service; enabled; vendor preset: disabled)
Active: active (running) since Thu 2024-01-04 16:45:18 CST; 1min 9s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 6131 (kube-scheduler)
CGroup: /system.slice/kube-scheduler.service
└─6131 /usr/local/bin/kube-scheduler --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig --leader-elect=true --v=2

Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: schedulerName: default-scheduler
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: >
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.437013 6131 server.go:154] "Starting Kubernetes Scheduler" version="v1.28.0"
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.437027 6131 server.go:156] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.438927 6131 tlsconfig.go:200] "Loaded serving cert" certName="Generated self signed cert" cer...
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.439276 6131 named_certificates.go:53] "Loaded SNI cert" index=0 certName="self-signed loopbac...
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.439311 6131 secure_serving.go:210] Serving securely on [::]:10259
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.439359 6131 tlsconfig.go:240] "
Starting DynamicServingCertificateController"
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.540853 6131 leaderelection.go:250] attempting to acquire leader lease kube-system/ku...eduler...
Jan 04 16:45:19 k8s-master1 kube-scheduler[6131]: I0104 16:45:19.555700 6131 leaderelection.go:260] successfully acquired lease kube-system/kube-scheduler
Hint: Some lines were ellipsized, use -l to show in full.

 

[root@k8s-master1 kubernetes]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy ok

 

2.5.9 工作节点(worker node)部署

2.5.9.1  容器运行时docker部署

2.5.9.1.1 docker安装
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo 
yum -y install docker-ce
systemctl enable --now docker
2.5.9.1.2 修改docker配置
cat << EOF | sudo tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
[root@k8s-node1 docker]# systemctl restart docker
[root@k8s-node1 docker]#
[root@k8s-node1 docker]#
[root@k8s-node1 docker]#
[root@k8s-node1 docker]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[root@k8s-node1 docker]#
[root@k8s-node1 docker]#
[root@k8s-node1 docker]#
[root@k8s-node1 docker]# docker version
Client: Docker Engine - Community
Version: 24.0.7
API version: 1.43
Go version: go1.20.10
Git commit: afdd53b
Built: Thu Oct 26 09:11:35 2023
OS/Arch: linux/amd64
Context: default

Server: Docker Engine - Community
Engine:
Version: 24.0.7
API version: 1.43 (minimum version 1.12)
Go version: go1.20.10
Git commit: 311b9ff
Built: Thu Oct 26 09:10:36 2023
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.6.26
GitCommit: 3dd1e886e55dd695541fdcd67420c2888645a495
runc:
Version: 1.1.10
GitCommit: v1.1.10-0-g18a0cb0
docker-init:
Version: 0.19.0
GitCommit: de40ad0

 

2.5.9.1.2   cri-dockerd安装
cri-dockerd是docker容器的接口

 

wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd-0.3.9-3.el7.x86_64.rpm
yum install -y cri-dockerd-0.3.9-3.el7.x86_64.rpm
vi /usr/lib/systemd/system/cri-docker.service

#修改第10行内容,默认启动的pod镜像太低,指定到3.9版本。使用阿里云的镜像仓库,国内下载镜像会比较快
ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 --container-runtime-endpoint fd://

[root@k8s-node2 ~]# systemctl enable --now cri-docker
Created symlink from /etc/systemd/system/multi-user.target.wants/cri-docker.service to /usr/lib/systemd/system/cri-docker.service.

[root@k8s-node2 ~]# systemctl status cri-docker
● cri-docker.service - CRI Interface for Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/cri-docker.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2024-01-05 08:29:57 CST; 3s ago
Docs: https://docs.mirantis.com
Main PID: 1821 (cri-dockerd)
Tasks: 7
Memory: 13.8M
CGroup: /system.slice/cri-docker.service
└─1821 /usr/bin/cri-dockerd --pod-infra-container-image=registry.k8s.io/pause:3.9 --container-runtime-endpoint fd://

Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock"
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Start docker client with request timeout 0s"
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Hairpin mode is set to none"
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Loaded network plugin cni"
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Docker cri networking managed by network plugin cni"
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Setting cgroupDriver systemd"
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkC...idr:,},}"
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Starting the GRPC backend for the Docker CRI interface."
Jan 05 08:29:57 k8s-node2 cri-dockerd[1821]: time="2024-01-05T08:29:57+08:00" level=info msg="Start cri-dockerd grpc backend"
Jan 05 08:29:57 k8s-node2 systemd[1]: Started CRI Interface for Docker Application Container Engine.
Hint: Some lines were ellipsized, use -l to show in full.
#在run目录下可以看到cri-dockerd.sock ,这个就是后面kubelet调用docker的sock
[root@k8s-node1 run]# ll /run/cri-dockerd.sock
srw-rw---- 1 root docker 0 Jan 5 08:33 /run/cri-dockerd.sock

2.5.9.2 部署kubelet

在k8s-master1上操作

2.5.9.2.1 创建kubelet-bootstrap.kubeconfig
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/token.csv)

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.198.100:6443 --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig

kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
2.5.9.2.2 创建kubelet配置文件

在node节点操作

mkdir -p /etc/kubernetes/ssl

k8s-node1配置文件

cat > /etc/kubernetes/kubelet.json << "EOF"
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/ssl/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "192.168.198.147",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOF

k8s-node2配置文件

 

cat > /etc/kubernetes/kubelet.json << "EOF"
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/ssl/ca.pem"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": false
}
},
"authorization": {
"mode": "Webhook",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"address": "192.168.198.148",
"port": 10250,
"readOnlyPort": 10255,
"cgroupDriver": "systemd",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "cluster.local.",
"clusterDNS": ["10.96.0.2"]
}
EOF

 

2.5.9.2.3 创建kubelet服务启动管理文件
#在node节点上创建kubulet的工作目录
[root@k8s-node2 kubernetes]# mkdir /var/lib/kubelet
cat > /usr/lib/systemd/system/kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
--cert-dir=/etc/kubernetes/ssl \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--config=/etc/kubernetes/kubelet.json \
--container-runtime-endpoint=unix:///run/cri-dockerd.sock \
--rotate-certificates \
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 \
--v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF
2.5.9.2.4 同步文件到集群节点

k8s-master1上生成的kubelet-bootstrap.kubeconfig,ca.pem同步到node节点

[root@k8s-master1 k8s-work]# for i in k8s-node1 k8s-node2;do scp kubelet-bootstrap.kubeconfig $i:/etc/kubernetes/;done
kubelet-bootstrap.kubeconfig 100% 2105 1.4MB/s 00:00
kubelet-bootstrap.kubeconfig 100% 2105 1.4MB/s 00:00
[root@k8s-master1 k8s-work]# for i in k8s-node1 k8s-node2;do scp ca.pem $i:/etc/kubernetes/ssl;done
ca.pem 100% 1310 808.3KB/s 00:00
ca.pem 100% 1310 716.6KB/s 00:00

把二进制文件分发到node节点

[root@k8s-master1 bin]# for i in k8s-node1 k8s-node2;do scp kubelet kube-scheduler $i:/usr/local/bin/;done
kubelet 100% 106MB 16.3MB/s 00:06
kube-scheduler 100% 53MB 12.7MB/s 00:04
kubelet 100% 106MB 28.6MB/s 00:03
kube-scheduler 100% 53MB 42.4MB/s 00:01

 

2.5.9.2.5 启动服务
systemctl daemon-reload
systemctl enable --now kubelet

[root@k8s-node1 system]# systemctl status kubelet
● kubelet.service - Kubernetes Kubelet
Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2024-01-05 09:21:20 CST; 3min 40s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 6177 (kubelet)
CGroup: /system.slice/kubelet.service
└─6177 /usr/local/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig --cert-dir=/etc/kubernetes/ssl --kubeconfig=/etc/kub...

Jan 05 09:24:11 k8s-node1 kubelet[6177]: E0105 09:24:11.632795 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:16 k8s-node1 kubelet[6177]: E0105 09:24:16.633986 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:21 k8s-node1 kubelet[6177]: E0105 09:24:21.713576 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:26 k8s-node1 kubelet[6177]: E0105 09:24:26.714288 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:31 k8s-node1 kubelet[6177]: E0105 09:24:31.715295 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:36 k8s-node1 kubelet[6177]: E0105 09:24:36.717562 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:41 k8s-node1 kubelet[6177]: E0105 09:24:41.718346 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:46 k8s-node1 kubelet[6177]: E0105 09:24:46.719040 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:51 k8s-node1 kubelet[6177]: E0105 09:24:51.721244 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Jan 05 09:24:56 k8s-node1 kubelet[6177]: E0105 09:24:56.722712 6177 kubelet.go:2855] "Container runtime network not ready" networkReady="NetworkRead...itialized"
Hint: Some lines were ellipsized, use -l to show in full.
#node节点都已加入集群,没有在master上安装kubelet,master只作为管理节点所以get node看不到master节点
[root@k8s-master1 bin]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-node1 NotReady <none> 3m59s v1.28.0
k8s-node2 NotReady <none> 43s v1.28.0
#启动kubelet后,在/etc/kubunets目录下会自动生成证书配置文件 kubelet.kubeconfig,在ssl中会看到自动签发的证书kubelet-client-2024-01-05-09-21-21.pem、kubelet.crt、kubelet.key
#后续如果节点异常,重新签发证书,需要把ssl里面的证书先删除,否则会看到节点加入集群失败
[root@k8s-node1 kubernetes]# ll
total 12
-rw------- 1 root root 2105 Jan 5 09:09 kubelet-bootstrap.kubeconfig
-rw-r--r-- 1 root root 711 Jan 5 08:52 kubelet.json
-rw------- 1 root root 2234 Jan 5 09:21 kubelet.kubeconfig
drwxr-xr-x 2 root root 138 Jan 5 09:21 ssl
[root@k8s-node1 kubernetes]# vi kubelet.kubeconfig
[root@k8s-node1 kubernetes]# cd ssl
[root@k8s-node1 ssl]# ll
total 16
-rw-r--r-- 1 root root 1310 Jan 5 09:09 ca.pem
-rw------- 1 root root 1224 Jan 5 09:21 kubelet-client-2024-01-05-09-21-21.pem
lrwxrwxrwx 1 root root 58 Jan 5 09:21 kubelet-client-current.pem -> /etc/kubernetes/ssl/kubelet-client-2024-01-05-09-21-21.pem
-rw-r--r-- 1 root root 2275 Jan 5 09:21 kubelet.crt
-rw------- 1 root root 1679 Jan 5 09:21 kubelet.key
[root@k8s-node1 ssl]#

 

 

2.5.9.3 部署kube-proxy

2.5.9.3.1 创建kube-proxy证书请求文件

在k8s-master1上生成证书

cat > kube-proxy-csr.json << "EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF

 

2.5.9.3.2 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

 

[root@k8s-master1 k8s-work]# ls kube-proxy*
kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem

2.5.9.3.3 创建kubeconfig文件

kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.198.100:6443 --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
2.5.9.3.4 创建服务配置文件

在node节点上创建

cat > /etc/kubernetes/kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.198.147
clientConnection:
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16
healthzBindAddress: 192.168.198.147:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.198.147:10249
mode: "ipvs"
EOF
cat > /etc/kubernetes/kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.198.148
clientConnection:
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
clusterCIDR: 10.244.0.0/16
healthzBindAddress: 192.168.198.148:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.198.148:10249
mode: "ipvs"
EOF
2.5.9.3.5 创建服务启动管理文件

 在node节点上创建

#创建proxy的工作目录,和服务启动文件对应
mkdir -p /var/lib/kube-proxy
cat >  /usr/lib/systemd/system/kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.yaml \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

2.5.9.3.6 同步文件到集群工作节点主机 

 

[root@k8s-master1 k8s-work]# ls kube-proxy*
kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.kubeconfig kube-proxy.pem
[root@k8s-master1 k8s-work]#
[root@k8s-master1 k8s-work]#
[root@k8s-master1 k8s-work]#
[root@k8s-master1 k8s-work]# for i in k8s-node1 k8s-node2;do scp kube-proxy.kubeconfig $i:/etc/kubernetes/
> done
kube-proxy.kubeconfig 100% 6211 4.4MB/s 00:00
kube-proxy.kubeconfig 100% 6211 4.0MB/s 00:00
[root@k8s-master1 k8s-work]# for i in k8s-node1 k8s-node2;do scp kube-proxy*pem $i:/etc/kubernetes/ssl; done
kube-proxy-key.pem 100% 1679 1.0MB/s 00:00
kube-proxy.pem 100% 1403 998.3KB/s 00:00
kube-proxy-key.pem 100% 1679 1.4MB/s 00:00
kube-proxy.pem 100% 1403 1.3MB/s 00:00
[root@k8s-master1 k8s-work]#

2.5.9.3.7 服务启动 

systemctl daemon-reload
systemctl enable --now kube-proxy

[root@k8s-node1 bin]# systemctl status kube-proxysystemctl status kube-proxy
Unit kube-proxysystemctl.service could not be found.
Unit status.service could not be found.
● kube-proxy.service - Kubernetes Kube-Proxy Server
Loaded: loaded (/usr/lib/systemd/system/kube-proxy.service; enabled; vendor preset: disabled)
Active: active (running) since Fri 2024-01-05 10:53:12 CST; 38s ago
Docs: https://github.com/kubernetes/kubernetes
Main PID: 11727 (kube-proxy)
Tasks: 5
Memory: 17.1M
CGroup: /system.slice/kube-proxy.service
└─11727 /usr/local/bin/kube-proxy --config=/etc/kubernetes/kube-proxy.yaml --v=2

Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.551675 11727 shared_informer.go:311] Waiting for caches to sync for endpoint slice config
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.552316 11727 config.go:315] "Starting node config controller"
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.552333 11727 shared_informer.go:311] Waiting for caches to sync for node config
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.569095 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master"
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.569432 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master"
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652277 11727 shared_informer.go:318] Caches are synced for endpoint slice config
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652318 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master"
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652328 11727 proxier.go:925] "Not syncing ipvs rules until Services and Endpoints have bee...m master"
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652339 11727 shared_informer.go:318] Caches are synced for service config
Jan 05 10:53:12 k8s-node1 kube-proxy[11727]: I0105 10:53:12.652544 11727 shared_informer.go:318] Caches are synced for node config
Hint: Some lines were ellipsized, use -l to show in full.

 

 2.5.10 网络组件部署 Calico

2.5.10.1 下载

在calico的官网进行下载对应的yaml文件,在我们master节点上创建

https://docs.tigera.io/calico/latest/about

查询版本,下载支持目前安装的k8s版本的calico,目前安装的k8s版本为v1.28

 

选择calico v3.26版本

#把对应命令复制过来,不需要执行
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml


#先使用wget下载后,检查文件正常后在进行部署
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml
wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml

[root@k8s-master1 ~]# wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml
--2024-01-05 13:49:20-- https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.110.133, 185.199.109.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... failed: Connection refused.
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... failed: Connection refused.
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... failed: Connection refused.
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 824 [text/plain]
Saving to: ‘custom-resources.yaml’

100%[==========================================================================================================================>] 824 --.-K/s in 0s

2024-01-05 13:50:26 (63.4 MB/s) - ‘custom-resources.yaml’ saved [824/824]

[root@k8s-master1 ~]# ll *yaml
-rw-r--r-- 1 root root 824 Jan 5 13:50 custom-resources.yaml
-rw-r--r-- 1 root root 1475581 Jan 5 13:50 tigera-operator.yaml

 

 2.5.10.2 修改文件

#custom-resources.yaml文件默认的pod网络为192.168.0.0/16,我们定义的pod网络为10.244.0.0/16,需要修改后再执行
cidr: 192.168.0.0/16 修改成 cidr: 10.244.0.0/16

 

2.5.10.3 应用文件

创建要使用create 命令,不要使用apply -f命令,会报错。

#执行tigera-operator.yaml
[root@k8s-master1 ~]# kubectl create -f tigera-operator.yaml
namespace/tigera-operator created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpfilters.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/apiservers.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/imagesets.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/installations.operator.tigera.io created
customresourcedefinition.apiextensions.k8s.io/tigerastatuses.operator.tigera.io created
serviceaccount/tigera-operator created
clusterrole.rbac.authorization.k8s.io/tigera-operator created
clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created
deployment.apps/tigera-operator created
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# kubectl get ns
NAME STATUS AGE
default Active 23h
kube-node-lease Active 23h
kube-public Active 23h
kube-system Active 23h
tigera-operator Active 18s
[root@k8s-master1 ~]# kubectl get pod -n tigera-operator
NAME READY STATUS RESTARTS AGE
tigera-operator-7f8cd97876-tdjlq 1/1 Running 0 23s

 tigera-operator中pod都running后,执行 kubectl create -f custom-resources.yaml 

[root@k8s-master1 ~]# kubectl create -f custom-resources.yaml 
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
You have new mail in /var/spool/mail/root
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# kubectl get ns
NAME STATUS AGE
calico-system Active 52s
default Active 23h
kube-node-lease Active 23h
kube-public Active 23h
kube-system Active 23h
tigera-operator Active 95s
[root@k8s-master1 ~]# kubectl get pod -n calico-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-798969c8c4-dbnct 1/1 Running 0 4m31s
calico-node-742hh 1/1 Running 0 4m32s
calico-node-c5dcj 1/1 Running 0 4m32s
calico-typha-5f789cd78d-zdklj 1/1 Running 0 4m33s
csi-node-driver-86xh4 2/2 Running 0 4m32s
csi-node-driver-b6czj 2/2 Running 0 4m32s
[root@k8s-master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-node1 Ready <none> 4h43m v1.28.0
k8s-node2 Ready <none> 4h39m v1.28.0

2.5.10 部署CoreDNS 

cat >  coredns.yaml << "EOF"
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
# replicas: not specified here:
# 1. Default is 1.
# 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
kubernetes.io/os: linux
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.10.1
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.96.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

EOF
[root@k8s-master1 ~]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
[root@k8s-master1 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-758895f87b-phqsp 1/1 Running 0 98s 10.244.169.131 k8s-node2 <none> <none>
#验证dns域名解析是否正常
[root@k8s-node1 kubernetes]# dig -t a www.baidu.com @10.96.0.2

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.15 <<>> -t a www.baidu.com @10.96.0.2
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 54240
;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 1

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;www.baidu.com. IN A

;; ANSWER SECTION:
www.baidu.com. 12 IN CNAME www.a.shifen.com.
www.a.shifen.com. 12 IN CNAME www.wshifen.com.
www.wshifen.com. 12 IN A 103.235.46.40

;; Query time: 74 msec
;; SERVER: 10.96.0.2#53(10.96.0.2)
;; WHEN: Fri Jan 05 15:48:18 CST 2024
;; MSG SIZE rcvd: 161

2.5.11 部署应用验证

kubectl create ns my-nginx

kubectl create deploy my-nginx --image=nginx:1.23.0 -n my-nginx --dry-run -o yaml >> my-nginx.yaml

kubectl apply -f my-nginx.yaml
kubectl expose deployment my-nginx --port=80 --target-port=80 --type=NodePort -n my-nginx --dry-run -o yaml >> nginx-svc.yaml

kubectl apply -f nginx-svc.yaml
[root@k8s-master1 ~]# kubectl apply -f nginx-svc.yaml 
service/my-nginx-svc created
[root@k8s-master1 ~]#
[root@k8s-master1 ~]#
[root@k8s-master1 ~]# kubectl get all -n my-nginx
NAME READY STATUS RESTARTS AGE
pod/my-nginx-556b5ccbb8-d4wcl 1/1 Running 0 24m

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/my-nginx-svc NodePort 10.96.202.184 <none> 80:32724/TCP 8s

NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/my-nginx 1/1 1 1 24m

NAME DESIRED CURRENT READY AGE
replicaset.apps/my-nginx-556b5ccbb8 1 1 1 24m

好了,今天的小知识你学会了吗?


链接:https://www.cnblogs.com/xmwan/p/17940137

(版权归原作者所有,侵删)

微信扫码关注该文公众号作者

来源:马哥Linux运维

相关新闻

Kubernetes部署PostgreSQL集群掌握Velero:轻松备份和恢复你的Kubernetes集群Slack 发布用于 Kubernetes StatefulSet 部署的 Operator[分享] 暑假磨耳朵看动画,豆瓣9.6高分!中英版本《希尔达》OpenAI“草莓”落地了?ChatGPT惊现神秘GPT-4o模型,网友热议:新版本提升不多,但逻辑推理有改进Ubuntu重大变化:未来将使用最新Linux内核版本iPhone17系列或将新增「 Air 」版本/华为AI消除功能目前仅支持部分新机型/导游嘲讽游客穷就别出来,官方:已成立调查组30秒生成建模师级Mesh!最大可生成面数提升至1600,GitHub揽星1.9k项目发布V2版本微星发布新款海皇戟 RS、宙斯盾 S 电竞主机:AMD锐龙R9 9900X + RTX 4080Super 版本 18999 元杭州水王的故事,揭开不为人知的另一个版本歼31就是隐身战斗机版本的AK47!美媒罕见发声,承认技不如人GPT-4o新版本突然上线:丝滑解决9.11和9.9谁大,更强也更便宜了谷歌终于赢了OpenAI一回:实验版本Gemini 1.5 Pro超越GPT-4o华为Pura70系列降价,部分消费者要求退差价/宝马中国:4S店拒不交车涉及案例为个案/英伟达将为中国市场准备新旗舰AI芯片版本英伟达官宣全面开源GPU内核驱动,两年迭代至最强版本!下一代Blackwell全用开源一些数据库专家称,最新的 MySQL 版本令人失望改善 Kubernetes 日志以增强可观测性《 庆余年 》等小说入藏国家版本馆,消息称宝能已参与众泰汽车运营,针对仅退款江苏消保委建议建立申诉机制,这就是今天的其他大新闻!使用velero进行kubernetes灾备世界第一部AI“生成式”纪录片,5250亿个剪辑版本,每场放映都独一无二国内首款鸿蒙人形机器人正在蔚来、亨通等工厂检测验证;华为发布全球首个基于R18的5G-A商用版本Apollo丨智能制造日报Spring 全家桶版本更新:Spring Boot、Spring Security 和 Spring ModulithBB鸭 | 小米官方回应SU7工厂停工;微信新版本支持视频倍速;爱奇艺限制投屏被判补偿41天会员;iPhone X成过时产品写在《鸣潮》首个大版本后:库洛用最朴实的节奏奔赴一场浪漫狂想
logo
联系我们隐私协议©2024 bendi.news
Bendi新闻
Bendi.news刊载任何文章,不代表同意其说法或描述,仅为提供更多信息,也不构成任何建议。文章信息的合法性及真实性由其作者负责,与Bendi.news及其运营公司无关。欢迎投稿,如发现稿件侵权,或作者不愿在本网发表文章,请版权拥有者通知本网处理。