专栏首页Kubernetes手记【云+社区年度征文】Kubernetes v1.19.0高可用安装部署
原创

【云+社区年度征文】Kubernetes v1.19.0高可用安装部署

1 系统准备

1.1 环境准备

服务器均采用腾讯云CVM,需提前创建好VPC、子网等

1.2 硬件信息

hostname

cpu

mem

disk

ip

K8s-master01

4

4

20GB

172.20.5.11

K8s-master02

4

4

20GB

172.20.5.12

K8s-master03

4

4

20GB

172.20.5.13

K8s-lb

4

4

20GB

172.20.5.10

K8s-node01

4

4

20GB

172.20.2.11

1.3 软件信息

CentOS Linux release 7.3.1611 (Core)

Kubernetesv 1.19.0

Docker 19.03.12

1.4 安装流程说明

  • 服务器环境配置
    • 配置所有服务器主机名、防火墙、SElinux、Swpa、免密登录、hosts等,为后续安装做准备。
  • 安装docker
    • 安装docker,为Kubernetes运行提供容器运行时。
  • 安装Kubernetes
    • Google开源的一个容器编排引擎,它支持自动化部署、大规模可伸缩、应用容器化管理。
  • 安装haproxykeepalived
    • 提供高可用性,负载均衡。
  • 下载镜像
  • master01初始化
  • master02和master01加入集群
  • node01加入集群
  • 高可用测试

1.5 修改主机名

所有节点执行

修改所有主机名,对应1.2中硬件信息的hostname

hostnamectl set-hostname k8s-master01

1.6 关闭防火墙

所有节点执行

systemctl disable --now firewalld

1.7 禁用swap

所有节点执行

swapoff -a # 临时禁用
sed -i 's/.*swap.*/#&/' /etc/fstab # 永久禁用

1.8 关闭 SELinux

所有节点执行

setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

1.9 设置系统时区、同步时间

所有节点执行

timedatectl set-timezone Asia/Shanghai
systemctl enable --now chronyd

# 将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog && systemctl restart crond

1.10 配置内核参数

所有节点执行

cat >> /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

sysctl -p

1.11 内核升级

所有节点执行

yum update kernel systemd -y

1.12 配置免密登录

k8s-master01执行

ssh-keygen -t rsa
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master02
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master03
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-node01

2 部署docker

2.1 添加docker yum源

所有节点执行

# 安装必要依赖
yum install -y yum-utils device-mapper-persistent-data lvm2
# 添加aliyun docker-ce yum源
yum -y install yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 重建yum缓存
yum makecache fast

2.2 安装指定版本docker

所有节点执行

yum install -y docker-ce-19.03.12-3.el7

2.3 配置docker

mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://su9ppkb0.mirror.aliyuncs.com"]
}
EOF
#添加开机自启,立即启动
systemctl enable --now docker
systemctl daemon-reload
systemctl restart docker

3 安装ipvs

3.1 安装软件

所有节点执行

yum install ipvsadm ipset sysstat conntrack libseccomp -y

3.2 加载模块

所有节点执行

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
modprobe -- ip_tables
modprobe -- ip_set
modprobe -- xt_set
modprobe -- ipt_set
modprobe -- ipt_rpfilter
modprobe -- ipt_REJECT
modprobe -- ipip
EOF

3.3 配置重启自动加载

所有节点执行

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

4 部署kubernetes

4.1 添加kubernetes源

所有节点执行

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#重建yum缓存,输入y添加证书认证
yum makecache fast

4.2 查看安装版本

yum list |grep kubelet
yum list |grep kubeadm
yum list |grep kubectl

4.3 安装kubeadm、kubelet、kubectl

所有master节点执行

yum install -y kubelet-1.19.0-0 --disableexcludes=kubernetes
yum install -y kubeadm-1.19.0-0 --disableexcludes=kubernetes
yum install -y kubectl-1.19.0-0 --disableexcludes=kubernetes
# 将 kubelet 设置为开机自动启动
systemctl enable --now kubelet

4.4 配置自动补全命令

所有master节点执行

#安装bash自动补全插件
yum install bash-completion -y

4.5 设置kubectl与kubeadm命令补全,下次login生效

K8s-master01:3执行

kubectl completion bash >/etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm

5 集群初始化

5.1 配置集群高可用

高可用采用的是HAProxy+Keepalived来进行高可用和master节点的流量负载均衡,HAProxy和KeepAlived以守护进程的方式在所有Master节点部署

安装软件

yum install keepalived haproxy -y

配置 Haproxy

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000

#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
# 修改集群IP地址
    server  k8s-master01 172.20.5.11:6443 check
    server  k8s-master02 172.20.5.12:6443 check
    server  k8s-master03 172.20.5.13:6443 check

#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:9999
    stats auth           admin:P@ssW0rd
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats

将配置文件分发给k8s-master02k8s-master03

scp /etc/haproxy/haproxy.cfg root@k8s-master02:/etc/haproxy
scp /etc/haproxy/haproxy.cfg root@k8s-master03:/etc/haproxy

安装 keepalived

k8s-master01配置

vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
# 定义脚本
vrrp_script check_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
   172.20.5.10
    }

    # 调用脚本
    #track_script {
    #    check_apiserver
    #}
}

k8s-master02配置

! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
# 定义脚本
vrrp_script check_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
   172.20.5.10
    }

    # 调用脚本
    #track_script {
    #    check_apiserver
    #}
}

k8s-master03配置

! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
     failover@firewall.loc
     sysadmin@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
# 定义脚本
vrrp_script check_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 2
    weight -5
    fall 3
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 98
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
   172.20.5.10
    }

    # 调用脚本
    #track_script {
    #    check_apiserver
    #}
}

编写健康检测脚本

所有master节点执行

vim /etc/keepalived/check-apiserver.sh
#!/bin/bash

function check_apiserver(){
 for ((i=0;i<5;i++))
 do
  apiserver_job_id=${pgrep kube-apiserver}
  if [[ ! -z ${apiserver_job_id} ]];then
   return
  else
   sleep 2
  fi
  apiserver_job_id=0
 done
}

# 1->running    0->stopped
check_apiserver
if [[ $apiserver_job_id -eq 0 ]];then
 /usr/bin/systemctl stop keepalived
 exit 1
else
 exit 0
fi

重启haproxykeepalived

所有master节点执行

systemctl enable --now keepalived
systemctl enable --now haproxy

5.2 初始化k8s-master01

1 编写文件

所有节点执行

cat >> kubeadm.yaml <<EOF
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: "k8s-lb:16443"
networking:
  dnsDomain: cluster.local
  podSubnet: 192.168.0.0/16
  serviceSubnet: 10.211.0.0/12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs
EOF

2 下载镜像

所有节点执行

kubeadm config images pull --config kubeadm.yaml

3 集群初始化

k8s-master01执行

kubeadm init --config kubeadm.yaml --upload-certs
[init] Using Kubernetes version: v1.19.0
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc
...
...
...
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join k8s-lb:16443 --token n7ipit.4dkqd9uxa0b9d153 \
    --discovery-token-ca-cert-hash sha256:41a1353a03c99f46868294c28f9948bbc2cca957d98eb010435a493112ec7caa \
    --control-plane --certificate-key 6ce0872da76396c30c430a0d4e629bee46a508890c29d0f86d7982380c621889

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join k8s-lb:16443 --token n7ipit.4dkqd9uxa0b9d153 \
    --discovery-token-ca-cert-hash sha256:41a1353a03c99f46868294c28f9948bbc2cca957d98eb010435a493112ec7caa 

配置环境变量

cat >> /root/.bashrc <<EOF
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /root/.bashrc

5.3 安装网络插件

wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
kubectl apply -f calico.yaml

5.4 查看集群状态

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   19m   v1.19.0

6 节点加入集群

6.1 master节点加入

请确保待加入集群的节点已做好初始化配置,安装docker、kubeadm、kubectl、kubelet,且已经下载好需要的镜像

k8s-master02和k8s-master03执行

kubeadm join k8s-lb:16443 --token n7ipit.4dkqd9uxa0b9d153 \
    --discovery-token-ca-cert-hash sha256:41a1353a03c99f46868294c28f9948bbc2cca957d98eb010435a493112ec7caa \
    --control-plane --certificate-key 6ce0872da76396c30c430a0d4e629bee46a508890c29d0f86d7982380c621889
...
This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

 mkdir -p $HOME/.kube
 sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
 sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
...

集群状态查看

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   19h   v1.19.0
k8s-master02   Ready    master   19h   v1.19.0
k8s-master03   Ready    master   19h   v1.19.0

6.2 node节点加入

k8s-node01执行

kubeadm join k8s-lb:16443 --token n7ipit.4dkqd9uxa0b9d153 \
    --discovery-token-ca-cert-hash sha256:41a1353a03c99f46868294c28f9948bbc2cca957d98eb010435a493112ec7caa 

查询集群节点信息

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES    AGE   VERSION
k8s-master01   Ready    master   19h   v1.19.0
k8s-master02   Ready    master   19h   v1.19.0
k8s-master03   Ready    master   19h   v1.19.0
k8s-node01     Ready    worker   17h   v1.19.0

7 测试集群高可用

7.1 查看当前vip地址所在节点

[root@k8s-master01 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether fa:82:dc:70:b8:00 brd ff:ff:ff:ff:ff:ff
    inet 172.20.5.11/24 brd 172.20.5.255 scope global eth0
       valid_lft forever preferred_lft forever
    # 虚拟IP当前在k8s-master01节点
    inet 172.20.5.10/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5cf6:fe52:d77c:a6c6/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::6e1c:9620:3254:e840/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::903f:b002:5039:f925/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever

7.2 关闭k8s-master01

[root@k8s-master01 ~]# poweroff

7.3 查看虚拟IP转移,并验证集群功能

可以看到,当k8s-master01关机后,keepalived心跳检测到后,自动将虚拟IP地址172.20.5.10转移至k8s-master02

[root@k8s-master02 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether fa:f6:59:0d:a6:00 brd ff:ff:ff:ff:ff:ff
    inet 172.20.5.12/24 brd 172.20.5.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 172.20.5.10/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::5cf6:fe52:d77c:a6c6/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::6e1c:9620:3254:e840/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever
    inet6 fe80::903f:b002:5039:f925/64 scope link tentative dadfailed 
       valid_lft forever preferred_lft forever

测试集群功能

[root@k8s-master02 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE   VERSION
k8s-master01   NotReady   master   19h   v1.19.0
k8s-master02   Ready      master   19h   v1.19.0
k8s-master03   Ready      master   19h   v1.19.0
k8s-node01     Ready      worker   17h   v1.19.0
[root@k8s-master02 ~]# kubectl get po -n kube-system -o wide
NAME                                      READY   STATUS    RESTARTS   AGE   IP                NODE           NOMINATED NODE   READINESS GATES
calico-kube-controllers-75d555c48-6h26g   1/1     Running   0          18h   192.168.195.3     k8s-master03   <none>           <none>
calico-node-dknrd                         1/1     Running   1          19h   172.20.5.11       k8s-master01   <none>           <none>
calico-node-klpd8                         1/1     Running   0          19h   172.20.5.13       k8s-master03   <none>           <none>
calico-node-sqps2                         1/1     Running   0          17h   172.20.2.11       k8s-node01     <none>           <none>
calico-node-w4nh9                         1/1     Running   0          19h   172.20.5.12       k8s-master02   <none>           <none>
coredns-546565776c-fhvc8                  1/1     Running   0          19h   192.168.122.131   k8s-master02   <none>           <none>
coredns-546565776c-kf7sm                  1/1     Running   0          19h   192.168.122.129   k8s-master02   <none>           <none>
etcd-k8s-master01                         1/1     Running   1          19h   172.20.5.11       k8s-master01   <none>           <none>
etcd-k8s-master02                         1/1     Running   0          19h   172.20.5.12       k8s-master02   <none>           <none>
etcd-k8s-master03                         1/1     Running   0          19h   172.20.5.13       k8s-master03   <none>           <none>
kube-apiserver-k8s-master01               1/1     Running   1          19h   172.20.5.11       k8s-master01   <none>           <none>
kube-apiserver-k8s-master02               1/1     Running   0          19h   172.20.5.12       k8s-master02   <none>           <none>
kube-apiserver-k8s-master03               1/1     Running   2          19h   172.20.5.13       k8s-master03   <none>           <none>
kube-controller-manager-k8s-master01      1/1     Running   4          19h   172.20.5.11       k8s-master01   <none>           <none>
kube-controller-manager-k8s-master02      1/1     Running   1          19h   172.20.5.12       k8s-master02   <none>           <none>
kube-controller-manager-k8s-master03      1/1     Running   0          19h   172.20.5.13       k8s-master03   <none>           <none>
kube-proxy-cjm2b                          1/1     Running   1          17h   172.20.2.11       k8s-node01     <none>           <none>
kube-proxy-d7hs9                          1/1     Running   1          19h   172.20.5.11       k8s-master01   <none>           <none>
kube-proxy-s57dl                          1/1     Running   0          19h   172.20.5.13       k8s-master03   <none>           <none>
kube-proxy-z8bfl                          1/1     Running   0          19h   172.20.5.12       k8s-master02   <none>           <none>
kube-scheduler-k8s-master01               1/1     Running   3          19h   172.20.5.11       k8s-master01   <none>           <none>
kube-scheduler-k8s-master02               1/1     Running   0          19h   172.20.5.12       k8s-master02   <none>           <none>
kube-scheduler-k8s-master03               1/1     Running   0          19h   172.20.5.13       k8s-master03   <none>           <none>

8 异常处理

8.1 创建registry容器失败

OCI runtime create failed: container_linux.go:348: starting container process caused "process_linux.go:297: copying bootstrap data to pipe caused \"write init-p: broken pipe\"": unknown

解决方案:内核版本过低,因为镜像仓库是在CentOS 7.3系统上搭建的,内核版本是3.10.0-327.el7.x86_64,升级到3.10.0-957.21.3.el7.x86_64后问题解决

8.2 节点执行kubeadm join后状态一直不正常,kubelet状态不断重启

kubelet: Failed to start ContainerManager Cannot set property TasksAccounting, or unknown property

解决方案:升级systemd组件,我这个出问题的节点是CentOS 7.3, systemd版本是systemd-219-30,升级至systemd-219-62后问题解决

9 参考文档

https://mp.weixin.qq.com/s/S01dVNKKg4E41wdKxHDiZQ

原创声明,本文系作者授权云+社区发表,未经许可,不得转载。

如有侵权,请联系 yunjia_community@tencent.com 删除。

我来说两句

0 条评论
登录 后参与评论

相关文章

  • 【云+社区年度征文】2020征文活动获奖名单公布

    由腾讯云+社区主办的云+社区 2020 年度征文活动在2020年12月31号圆满的落下帷幕。年度征文活动自2020年11月发布后,吸引了众多社区内的小伙伴。经过...

    云加社区
  • 基于kubeadm搭建k8s高可用集群

    这五台机器均需事先安装好Docker,由于安装过程比较简单这里不进行介绍,可以参考官方文档:

    端碗吹水
  • Kubernetes 1.19.0环境搭建

    Kubernetes(以下简称k8s)是一个开源的,用于管理云平台中多个主机上的容器化的应用,Kubernetes的目标是让部署容器化的应用简单并且高效(pow...

    gz_naldo
  • 在 arm64 架构上一键部署高可用 k8s v1.20.0 集群

    系统支持:centos7.6 以上 ubuntu16.04 以上 内核推荐 4.14 以上

    米开朗基杨
  • 11月容器技术产品月报 | 云原生监控正式公测

    ? 2020年11月 ? ? VOL:07 ? ? ? ? 腾小云告诉你最前线的产品新特性, 总有一款让你心动~ ? 云说新品 ? 容器产品新特性 11月上新...

    腾讯云原生
  • 【云+社区年度征文】云开发实践之Framework快速部署Kodexplorer

    CloudBase Framework 是云开发官方出品的云原生一体化部署工具,可以帮助开发者将静态网站、后端服务和小程序等应用,一键部署到云开发 Server...

    Doggy
  • KubeVela 正式开源:一个高可扩展的云原生应用平台与核心引擎

    美国西部时间 2020 年 11 月 18 日,在云原生技术“最高盛宴”的 KubeCon 北美峰会 2020 上,CNCF 应用交付领域小组(CNCF SIG...

    CNCF
  • 【云+社区年度征文】Deepin 安装 Postgres 及 docker 持久化

    PostgreSQL: The World's Most Advanced Open Source Relational Database

    caoayu
  • Helm 毕业了!它是从CNCF毕业的第10个项目

    4月30日--CNCF宣布Helm是第十个毕业的项目。从孵化的成熟度级别过渡到毕业阶段,项目必须表现出良好的采用、开放的治理过程,以及对社区、可持续性和包容性的...

    灵雀云
  • Kubernetes简介和安装

    Production-Grade Container Orchestration Automated container deployment, scaling...

    IT小马哥
  • 翻过那座山,就能看见海|kubernetes让DBA更优雅地管理数据库

    标题中的DBA其实包含两层含义:Database Architect 与 Database Administrator,我在这里都简称DBA了。

    沃趣科技
  • 2020十大边缘计算开源项目

    2020 年是非常特别的一年,各行各业都非常艰难,但是有一股力量逆势增长,不断迎来新的突破,那就是开源。即使是资本寒冬,一些开源公司如EMQ也能逆势融资,Ran...

    边缘计算
  • 【云+社区年度征文】Docker教程(1)—Docker简介与安装

    我们开发一个项目的时候通常会有两个环境,本地测试环境和线上环境。项目开发上线的时候,我们通常会把项目打成一个jar包或war包上传到线上的服务器,此时为了让项目...

    AlbertYang
  • 基于 KubeVela 与 Kubernetes 打造“无限能力”的开放 PaaS

    Kubernetes 生态本身的能力池固然是丰富的,但社区里并没有一个可扩展的、方便快捷的方式,能够帮助平台团队把这些能力快速“组装”成面向最终用户的功能(Fe...

    CNCF
  • 企业级PaaS平台OpenShift

    本文摘自于山金孝,潘晓华,刘世民撰写的《OpenShift云原生架构:原理与实践》一书,经出版方授权发布。

    Satoh_AI
  • CNCF中国云原生调查2019

    在CNCF,我们定期调查社区,以更好地了解开源和云原生技术的采用。我们第三次使用中文进行了"云原生调查中国",以更深入地了解中国采用云原生的速度,以及如何在这个...

    有点技术
  • 一键支持kuberntes抛弃docker的渣男行为

    v1.20.0-rc版本离线包里完全抛弃了docker,使用了最新版本的containerd. 正式版本会和kubernetes 1.20.0正式发版时同步发出...

    sealyun
  • OpenStack 上部署 Kubernetes 方案对比

      目前在 OpenStack 上部署 Kubernetes 有多种方式,本文会先简要描述每种方案,再使用图标进行简单的对比,并尝试给出个人认为的较优方案。

    用户2443625
  • CNCF宣布TiKV毕业

    旧金山,加利福尼亚州-2020年9月2日-CNCF®(Cloud Native Computing Foundation®,云原生计算基金会)为云原生软件构建可...

    CNCF

作者介绍

Honest1y

腾讯云容器运维工程师

腾讯云 · 容器运维工程师 (已认证)

专栏

精选专题

活动推荐

扫码关注云+社区

领取腾讯云代金券