目录

kubernetes基础-debian-部署k8s-1.28

debian12通过kubeadm安装部署k8s.

前置工作

卸载docker(如有)。

1
2
3
4
5
sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras

sudo rm -rf /var/lib/docker
sudo rm -rf /var/lib/containerd
sudo rm -rf /etc/docker

设置系统时区和时间同步。

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
timedatectl set-timezone Asia/Shanghai

# 安装 chrony
apt-get install -y chrony

# 修改为阿里的时钟源
sed -i "s/pool 2.debian.pool.ntp.org iburst/server ntp.aliyun.com iburst/g" /etc/chrony/chrony.conf

# 启用并立即启动 chrony 服务
systemctl restart chrony
systemctl enable chrony

# 查看与 chrony 服务器同步的时间源
chronyc sources 

安装ipvs工具。

1
apt-get install -y ipset ipvsadm 

关闭服务

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
# 关闭所有已激活的 swap 分区
swapoff -a

# 禁用系统启动时自动挂载 swap 分区
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

# 停止 AppArmor 服务
systemctl stop apparmor.service

# 禁用 AppArmor 服务
systemctl disable apparmor.service

# 禁用 Uncomplicated Firewall(ufw)
ufw disable

# 停止 ufw 服务
systemctl stop ufw.service

# 禁用 ufw 服务
systemctl disable ufw.service

内核优化

参考之前的Linux操作系统-内核优化

内核模块

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
# 将自定义在系统引导时自动加载的内核模块
cat > /etc/modules-load.d/kubernetes.conf << EOF
br_netfilter
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
ip_tables
EOF

# 添加可执行权限
chmod a+x /etc/modules-load.d/kubernetes.conf

containerd

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
# cri-containerd 比 containerd 多了 runc
wget https://github.com/containerd/containerd/releases/download/v1.7.21/cri-containerd-1.7.21-linux-amd64.tar.gz

tar xf cri-containerd-1.7.21-linux-amd64.tar.gz -C /

# 创建目录,该目录用于存放 containerd 配置文件
mkdir /etc/containerd

# 创建一个默认的 containerd 配置文件
containerd config default > /etc/containerd/config.toml

# 修改配置文件中使用的沙箱镜像版本
sed -i 's#registry.k8s.io/pause:3.8#registry.aliyuncs.com/google_containers/pause:3.9#' /etc/containerd/config.toml

# 设置容器运行时(containerd + CRI)在创建容器时使用 Systemd Cgroups 驱动
sed -i '/SystemdCgroup/s/false/true/' /etc/containerd/config.toml

# 修改存储目录
# mkdir /data1/containerd
# sed -i 's#root = "/var/lib/containerd"#root = "/data1/containerd"#' /etc/containerd/config.toml

启动脚本

vim /lib/systemd/system/containerd.service

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd

Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5

LimitNPROC=infinity
LimitCORE=infinity

TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
# 启用并立即启动 containerd 服务
systemctl enable --now containerd.service

# 检查 containerd 服务的当前状态
systemctl status containerd.service

# 检查 containerd crictl runc 的版本
containerd --version
crictl --version
runc --version

crictl config runtime-endpoint unix:///run/containerd/containerd.sock

镜像加速

在/etc/containerd/config.toml配置文件中指定config_path = /etc/containerd/certs.d

hosts.toml中可以配置多个镜像仓库,containerd下载竟像时会根据配置的顺序使用镜像仓库,只有当上一个仓库下载失败才会使用下一个镜像仓库.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
# docker hub镜像加速
mkdir -p /etc/containerd/certs.d/docker.io
cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
server = "https://docker.io"
[host."https://dockerproxy.com"]
  capabilities = ["pull", "resolve"]

[host."https://docker.m.daocloud.io"]
  capabilities = ["pull", "resolve"]

[host."https://reg-mirror.qiniu.com"]
  capabilities = ["pull", "resolve"]

[host."https://registry.docker-cn.com"]
  capabilities = ["pull", "resolve"]

[host."http://hub-mirror.c.163.com"]
  capabilities = ["pull", "resolve"]
EOF

# registry.k8s.io镜像加速
mkdir -p /etc/containerd/certs.d/registry.k8s.io
tee /etc/containerd/certs.d/registry.k8s.io/hosts.toml << 'EOF'
server = "https://registry.k8s.io"

[host."https://k8s.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# docker.elastic.co镜像加速
mkdir -p /etc/containerd/certs.d/docker.elastic.co
tee /etc/containerd/certs.d/docker.elastic.co/hosts.toml << 'EOF'
server = "https://docker.elastic.co"

[host."https://elastic.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# gcr.io镜像加速
mkdir -p /etc/containerd/certs.d/gcr.io
tee /etc/containerd/certs.d/gcr.io/hosts.toml << 'EOF'
server = "https://gcr.io"

[host."https://gcr.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# ghcr.io镜像加速
mkdir -p /etc/containerd/certs.d/ghcr.io
tee /etc/containerd/certs.d/ghcr.io/hosts.toml << 'EOF'
server = "https://ghcr.io"

[host."https://ghcr.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# k8s.gcr.io镜像加速
mkdir -p /etc/containerd/certs.d/k8s.gcr.io
tee /etc/containerd/certs.d/k8s.gcr.io/hosts.toml << 'EOF'
server = "https://k8s.gcr.io"

[host."https://k8s-gcr.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# mcr.m.daocloud.io镜像加速
mkdir -p /etc/containerd/certs.d/mcr.microsoft.com
tee /etc/containerd/certs.d/mcr.microsoft.com/hosts.toml << 'EOF'
server = "https://mcr.microsoft.com"

[host."https://mcr.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# nvcr.io镜像加速
mkdir -p /etc/containerd/certs.d/nvcr.io
tee /etc/containerd/certs.d/nvcr.io/hosts.toml << 'EOF'
server = "https://nvcr.io"

[host."https://nvcr.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# quay.io镜像加速
mkdir -p /etc/containerd/certs.d/quay.io
tee /etc/containerd/certs.d/quay.io/hosts.toml << 'EOF'
server = "https://quay.io"

[host."https://quay.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# registry.jujucharms.com镜像加速
mkdir -p /etc/containerd/certs.d/registry.jujucharms.com
tee /etc/containerd/certs.d/registry.jujucharms.com/hosts.toml << 'EOF'
server = "https://registry.jujucharms.com"

[host."https://jujucharms.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

# rocks.canonical.com镜像加速
mkdir -p /etc/containerd/certs.d/rocks.canonical.com
tee /etc/containerd/certs.d/rocks.canonical.com/hosts.toml << 'EOF'
server = "https://rocks.canonical.com"

[host."https://rocks-canonical.m.daocloud.io"]
  capabilities = ["pull", "resolve", "push"]
EOF

nerdctl

nerdctl 可以提供在宿主机上类 docker 的操作,可以提升用户体验。

1
2
3
4
cd /tmp
wget https://github.com/containerd/nerdctl/releases/download/v1.7.6/nerdctl-1.7.6-linux-amd64.tar.gz
tar xf nerdctl-1.7.6-linux-amd64.tar.gz
mv nerdctl /usr/sbin

安装组件

以下指令适用于 Kubernetes 1.28.

更新 apt 包索引并安装使用 Kubernetes apt 仓库所需要的包。

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
sudo apt-get update

# apt-transport-https 可能是一个虚拟包(dummy package);如果是的话,你可以跳过安装这个包
sudo apt-get install -y apt-transport-https ca-certificates curl gpg

#下载用于 Kubernetes 软件包仓库的公共签名密钥。所有仓库都使用相同的签名密钥,因此你可以忽略URL中的版本:
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

#添加 Kubernetes apt 仓库。 请注意,此仓库仅包含适用于 Kubernetes 1.28 的软件包; 对于其他 Kubernetes 次要版本,则需要更改 URL 中的 Kubernetes 次要版本以匹配你所需的次要版本 (你还应该检查正在阅读的安装文档是否为你计划安装的 Kubernetes 版本的文档)。
# 此操作会覆盖 /etc/apt/sources.list.d/kubernetes.list 中现存的所有配置。
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list

#更新 apt 包索引,安装 kubelet、kubeadm 和 kubectl,并锁定其版本:
sudo apt-get update
sudo apt-get install -y kubelet=1.28.13-1.1 kubeadm=1.28.13-1.1 kubectl=1.28.13-1.1

#锁定版本
sudo apt-mark hold kubelet kubeadm kubectl

#说明:在 Debian 12 和 Ubuntu 22.04 之前的早期版本中,默认情况下不存在 /etc/apt/keyrings 目录; 你可以通过运行 sudo mkdir -m 755 /etc/apt/keyrings 来创建它。

集群初始化

生成配置文件:kubeadm config print init-defaults > kubeadm.yaml

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
[root@master k8s]# cat kubeadm.yaml 
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
#localAPIEndpoint:
#  advertiseAddress: 192.168.2.232
#  bindPort: 6443
nodeRegistration:
  criSocket: unix:///run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
#  name: node
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
# 指定阿里云镜像以及k8s版本
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.28.13
# 新增
controlPlaneEndpoint: 192.168.2.198:6443
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.254.0.0/16
  podSubnet: 10.255.0.0/16  # 指定pod网段
scheduler: {}
# 新增如下:
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

验证镜像仓配置是否生效。

kubeadm config images list --config=kubeadm.yaml

提前拉取镜像。

kubeadm config images pull --config=kubeadm.yaml

查看镜像是否下载。

crictl images

开始初始化。

kubeadm init --config=kubeadm.yaml

安装完会有加入集群的相关指令。

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.2.198:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:91a2398cbadf3967950dc6900e7411d5319e82ad30e139a1163896f9a8c61234 \
        --control-plane 

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.2.198:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:91a2398cbadf3967950dc6900e7411d5319e82ad30e139a1163896f9a8c61234

master节点加入到集群内。

创建目录。

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
mkdir -p /etc/kubernetes/pki/etcd/

scp /etc/kubernetes/pki/ca.* 192.168.2.233:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/ca.* 192.168.2.234:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* 192.168.2.233:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* 192.168.2.234:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* 192.168.2.233:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* 192.168.2.234:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* 192.168.2.233:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/pki/etcd/ca.* 192.168.2.234:/etc/kubernetes/pki/etcd/

kubeadm join 192.168.2.198:6443 --token abcdef.0123456789abcdef \
      --discovery-token-ca-cert-hash sha256:91a2398cbadf3967950dc6900e7411d5319e82ad30e139a1163896f9a8c61234 \
      --control-plane 

集群刚部署完节点状态还是未就绪的,我们需要安装网络插件来完善一下。

calico

我们用 calico 来做为集群的网络插件,官网提供2种安装方式,一种是 operator 比较新,文档也不是很全,我们保守一点,用老的那种通过yaml配置文件的方式来。

curl https://raw.githubusercontent.com/projectcalico/calico/v3.28.1/manifests/calico.yaml -O

修改 CALICO_IPV4POOL_CIDR 为我们的网段,修改 CALICO_IPV4POOL_IPIPAlways 启用 ipip 协议。

验证 coredns dns 转发是否正常。

1
2
3
4
5
6
7
apt install -y dnsutils

kubectl get svc -n kube-system
NAME       TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.254.0.10   <none>        53/UDP,53/TCP,9153/TCP   15h

dig -t a www.baidu.com @10.254.0.10

创建 deployment 验证集群功能是否正常。

1
2
3
kubectl create deployment nginx-app --image=nginx --replicas 2

kubectl expose deployment nginx-app --name=nginx-web-svc --type NodePort --port 80 --target-port 80