在master节点执行命令
# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
将之前生成的kubeconfig文件以及pem证书拷贝到两台Node节点上。
cd /opt/kubernetes/ssl
scp *.kubeconfig root@host1:/opt/kubernetes/cfg/
scp *.kubeconfig root@host2:/opt/kubernetes/cfg/
scp *.pem root@host1:/opt/kubernetes/ssl/
scp *.pem root@host2:/opt/kubernetes/ssl/
将kubernetes-server-linux-amd64.tar.gz上传到两台node节点上,解压(Node节点执行以下操作)
tar -xzvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
mv kubelet kube-proxy /opt/kubernetes/bin/
拉取谷歌容器的阿里云镜像,并标记
docker pull registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
docker tag registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 pause-amd64:3.0
创建kubelet.sh脚本
touch kubelet.sh
vim kubelet.sh
内容如下
#!/bin/bash
NODE_ADDRESS=${1:-"172.18.98.47"} DNS_SERVER_IP=${2:-"10.10.10.2"}
cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\ --v=4 \\ --address=${NODE_ADDRESS} \\ --hostname-override=${NODE_ADDRESS} \\ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\ --cert-dir=/opt/kubernetes/ssl \\ --allow-privileged=true \\ --cluster-dns=${DNS_SERVER_IP} \\ --cluster-domain=cluster.local \\ --fail-swap-on=false \\ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
cat <<EOF >/usr/lib/systemd/system/kubelet.service [Unit] Description=kubernetes kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service
[Service] EnvironmentFile=-/opt/kubernetes/cfg/kubelet ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS Restart=on-failure
[Install] WantedBy=multi-user.target EOF
systemctl daemon-reload systemctl enable kubelet systemctl restart kubelet
保存退出
chmod 755 kubelet.sh
在host1上执行
# ./kubelet.sh 172.18.98.46 10.10.10.2
在host2上执行
# ./kubelet.sh 172.18.98.47 10.10.10.2
此时在/opt/kubernetes/cfg目录下生成了kubelet文件,在/usr/lib/systemd/system目录下生成了kubelet.service文件
查看kubelet文件
# cat /opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \ --v=4 \ --address=172.18.98.47 \ --hostname-override=172.18.98.47 \ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \ --cert-dir=/opt/kubernetes/ssl \ --allow-privileged=true \ --cluster-dns=10.10.10.2 \ --cluster-domain=cluster.local \ --fail-swap-on=false \ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
查看kubelet.service文件
# cat /usr/lib/systemd/system/kubelet.service [Unit] Description=kubernetes kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service
[Service] EnvironmentFile=-/opt/kubernetes/cfg/kubelet ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS Restart=on-failure
[Install] WantedBy=multi-user.target
查看进程
# ps -ef | grep kubelet root 2579 1 0 17:05 ? 00:00:00 /opt/kubernetes/bin/kubelet --logtostderr=true --v=4 --address=172.18.98.47 --hostname-override=172.18.98.47 --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig --cert-dir=/opt/kubernetes/ssl --allow-privileged=true --cluster-dns=10.10.10.2 --cluster-domain=cluster.local --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
此时如果我们查看/opt/kubernetes/cfg目录下,kubelet.kubeconfig并没有自动生成,它是去请求master节点的证书。
创建proxy.sh文件
touch proxy.sh
vim proxy.sh
内容如下
#!/bin/bash
NODE_ADDRESS=${1:-"172.18.98.47"}
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \ --v=4 \ --hostname-override=${NODE_ADDRESS} \ --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Proxy After=network.target
[Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS Restart=on-failure
[Install] WantedBy=multi-user.target EOF
systemctl daemon-reload systemctl enable kube-proxy systemctl restart kube-proxy
保存退出
chmod 755 proxy.sh
在host1上执行
# ./proxy.sh 172.18.98.46
在host2上执行
# ./proxy.sh 172.18.98.47
此时在/opt/kubernetes/cfg目录下生成了kube-proxy文件,在/usr/lib/systemd/system目录下生成了kube-proxy.service文件
查看kube-proxy文件
# cat /opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true --v=4 --hostname-override=172.18.98.47 --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
查看kube-proxy.service文件
# cat /usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Proxy After=network.target
[Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS Restart=on-failure
[Install] WantedBy=multi-user.target
查看进程
# ps -ef | grep kube-proxy root 7124 1 0 Jul31 ? 00:00:02 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=172.18.98.47 --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
回到master节点,查看证书请求
# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-DIASnublUtulGXj7P_z2XpevnJEp-177uMc5KQsrNVQ 21m kubelet-bootstrap Pending node-csr-jQX9fAR6GyMX3ZhQDEsgtqIoCDbnUhfMo5uyPXvZDVQ 57m kubelet-bootstrap Pending node-csr-naQV18GOsLRXbyMETuZdYKPMHVudpPl93-JNPMVTIVo 1h kubelet-bootstrap Pending
我们将这些证书请求通过
# kubectl certificate approve node-csr-jQX9fAR6GyMX3ZhQDEsgtqIoCDbnUhfMo5uyPXvZDVQ certificatesigningrequest "node-csr-jQX9fAR6GyMX3ZhQDEsgtqIoCDbnUhfMo5uyPXvZDVQ" approved
全部通过后,再次查看证书请求,大概如下所示
# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-DIASnublUtulGXj7P_z2XpevnJEp-177uMc5KQsrNVQ 42m kubelet-bootstrap Approved,Issued node-csr-e1wUzGmd2huMv0aTfH35rZtm47opoOd8pcjZjUmZdnw 20m kubelet-bootstrap Approved,Issued node-csr-jQX9fAR6GyMX3ZhQDEsgtqIoCDbnUhfMo5uyPXvZDVQ 1h kubelet-bootstrap Approved,Issued
我们回到node节点上,查看/opt/kubernetes/ssl下面的文件
# ll total 64 -rw------- 1 root root 1675 Feb 20 16:58 admin-key.pem -rw-r--r-- 1 root root 1277 Feb 20 16:58 admin.pem -rw------- 1 root root 2188 Feb 20 16:58 bootstrap.kubeconfig -rw------- 1 root root 1679 Feb 20 16:58 ca-key.pem -rw-r--r-- 1 root root 1359 Feb 20 16:58 ca.pem -rwxr-xr-x 1 root root 1498 Feb 20 16:58 kubeconfig.sh -rw-r--r-- 1 root root 1046 Feb 20 18:30 kubelet-client.crt -rw------- 1 root root 227 Feb 20 18:02 kubelet-client.key -rw-r--r-- 1 root root 1111 Feb 20 15:13 kubelet.crt -rw------- 1 root root 1675 Feb 20 15:13 kubelet.key -rw------- 1 root root 1679 Feb 20 16:58 kube-proxy-key.pem -rw------- 1 root root 6294 Feb 20 16:58 kube-proxy.kubeconfig -rw-r--r-- 1 root root 1403 Feb 20 16:58 kube-proxy.pem -rw------- 1 root root 1679 Feb 20 16:58 server-key.pem -rw-r--r-- 1 root root 1602 Feb 20 16:58 server.pem
会多出诸如kubelet-client.crt,kubelet-client.key的文件,再查看/opt/kubernetes/cfg
会发现此时已经生成了kubelet.kubeconfig
# ll total 32 -rw------- 1 root root 2188 Feb 20 14:41 bootstrap.kubeconfig -rw-r--r-- 1 root root 502 Feb 16 16:45 etcd -rw-r--r-- 1 root root 248 Feb 18 15:40 flanneld -rw-r--r-- 1 root root 477 Feb 20 17:05 kubelet -rw------- 1 root root 2279 Feb 20 18:30 kubelet.kubeconfig -rw-r--r-- 1 root root 133 Feb 20 18:03 kube-proxy -rw------- 1 root root 6294 Feb 20 14:41 kube-proxy.kubeconfig
再回到master节点上,查看集群信息
# kubectl get node NAME STATUS ROLES AGE VERSION 172.18.98.46 Ready <none> 15m v1.9.2 172.18.98.47 Ready <none> 21m v1.9.2
# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health": "true"} etcd-2 Healthy {"health": "true"} etcd-1 Healthy {"health": "true"}
现在master以及node全部就绪了,整个集群部署完成。