作者: 张首富
时间: 2019-06-18
个人博客: www.zhangshoufu.com
QQ群: 895291458
主机名 | IP地址 | 作用 |
---|---|---|
K8s-master01 | 192.168.1.25 | Kubernetes master/etcd,keepalive(主),HAproxy |
K8s-master02 | 192.168.1.26 | Kubernetes master/etcd,keepalive(备),HAproxy |
k8s-master03 | 192.168.1.196 | Kubernetes master/etcd |
/ | 192.168.1.16 | VIP(虚拟IP) |
1) 添加host解析
cat >> /etc/hosts<<EOF
192.168.1.25 k8s-master01
192.168.1.26 k8s-master02
192.168.1.196 k8s-master03
192.168.1.16 vip
EOF
2) 关闭selinux,关闭防火墙
setenforce 0
sudo sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config # 关闭selinux
systemctl stop firewalld.service && systemctl disable firewalld.service # 关闭防火墙
3)修改系统时区,语言
echo 'LANG="en_US.UTF-8"' >> /etc/profile;source /etc/profile #修改系统语言
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime # 修改时区(如果需要修改)
4)性能调优
cat >> /etc/sysctl.conf<<EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.neigh.default.gc_thresh1=4096
net.ipv4.neigh.default.gc_thresh2=6144
net.ipv4.neigh.default.gc_thresh3=8192
EOF
sysctl -p
5)配置转发
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
sysctl --system
6)配置免密登录 k8s-master01:
ssh-keygen -t rsa //一路回车
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master01
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master02
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master03
k8s-master02:
ssh-keygen -t rsa //一路回车
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master01
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master02
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master03
k8s-master03:
ssh-keygen -t rsa //一路回车
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master01
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master02
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master03
k8s-master01 install keepalive:
yum -y install epel-re*
yum -y install keepalived.x86_64
cat > /etc/keepalived/keepalived.conf <<-'EOF'
! Configuration File for keepalived
global_defs {
router_id k8s-master01
}
vrrp_instance VI_1 {
state MASTER
interface enp2s0
virtual_router_id 51
priority 150
advert_int 1
authentication {
auth_type PASS
auth_pass zsf
}
virtual_ipaddress {
192.168.1.16
}
}
EOF
systemctl enable keepalived.service && systemctl start keepalived.service
k8s-master01 install HAproxy:
yum -y install haproxy.x86_64
cat > /etc/haproxy/haproxy.cfg <<-'EOF'
global
chroot /var/lib/haproxy
daemon
group haproxy
user haproxy
log 127.0.0.1:514 local0 warning
pidfile /var/lib/haproxy.pid
maxconn 20000
spread-checks 3
nbproc 8
defaults
log global
mode tcp
retries 3
option redispatch
listen https-apiserver
bind 192.168.1.16:8443
mode tcp
balance roundrobin
timeout server 15s
timeout connect 15s
server apiserver01 192.168.1.25:6443 check port 6443 inter 5000 fall 5
server apiserver02 192.168.1.26:6443 check port 6443 inter 5000 fall 5
server apiserver03 192.168.1.196:6443 check port 6443 inter 5000 fall 5
EOF
systemctl start haproxy.service && systemctl enable haproxy.service
k8s-master02 install keepalive:
yum -y install epel-re*
yum -y install keepalived.x86_64
cat > /etc/keepalived/keepalived.conf <<-'EOF'
! Configuration File for keepalived
global_defs {
router_id k8s-master02
}
vrrp_instance VI_1 {
state MASTER
interface enp2s0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass zsf
}
virtual_ipaddress {
192.168.1.16
}
}
EOF
systemctl enable keepalived.service && systemctl start keepalived.service
k8s-master02 install HAproxy:
yum -y install haproxy.x86_64
cat > /etc/haproxy/haproxy.cfg <<-'EOF'
global
chroot /var/lib/haproxy
daemon
group haproxy
user haproxy
log 127.0.0.1:514 local0 warning
pidfile /var/lib/haproxy.pid
maxconn 20000
spread-checks 3
nbproc 8
defaults
log global
mode tcp
retries 3
option redispatch
listen https-apiserver
bind 192.168.1.16:8443
mode tcp
balance roundrobin
timeout server 15s
timeout connect 15s
server apiserver01 192.168.1.25:6443 check port 6443 inter 5000 fall 5
server apiserver02 192.168.1.26:6443 check port 6443 inter 5000 fall 5
server apiserver03 192.168.1.196:6443 check port 6443 inter 5000 fall 5
EOF
systemctl start haproxy.service && systemctl enable haproxy.service
查看服务状态: 1)查看keepalive
[root@k8s-master01 ~]# ip a | grep "192.168.1.16"
inet 192.168.1.16/32 scope global enp2s0
三台机器都需要执行:
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
三台机器都需要执行
yum install -y yum-utils device-mapper-persistent-data lvm2
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
yum makecache fast
yum -y install docker-ce
systemctl enable docker && systemctl start docker
三台机器都需要执行:
yum -y install kubectl-1.14.0
yum -y install kubelet-1.14.0
yum -y install kubeadm-1.14.0
systemctl enable kubelet && systemctl start kubelet
echo 'Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs --runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice"' >> /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
echo 'Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true --fail-swap-on=false"' >> /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
cat > /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://tj7mo5wf.mirror.aliyuncs.com"]
}
EOF
swapoff -a && sed -ir 's/.*-swap/#&/' /etc/fstab
cat > kubeadm-config.yaml <<-'EOF'
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
controlPlaneEndpoint: 192.168.1.16:8443
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
networking:
podSubnet: 10.10.0.0/16
EOF
kubeadm config images pull --config kubeadm-config.yaml #先把需要的镜像拉去下来
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs
安装成功之后可以看到如下信息
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.1.16:8443 --token o3444m.kt32joh143khrgga \
--discovery-token-ca-cert-hash sha256:fdff2f2a155fd3c0bcbde02cf9b5cf48ca95f9dfdf7a2b8f492a3b36119edf2a \
--experimental-control-plane --certificate-key 52dcb9e043e802555d3f758e09cf7beb2c4e80628e6132f30b3a4ae5246ca9d1
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --experimental-upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.16:8443 --token o3444m.kt32joh143khrgga \
--discovery-token-ca-cert-hash sha256:fdff2f2a155fd3c0bcbde02cf9b5cf48ca95f9dfdf7a2b8f492a3b36119edf2a
按照提示操作,在操作kubectl的用户家目录下创建密钥
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master01 NotReady master 16m v1.14.0 192.168.1.25 <none> CentOS Linux 7 (Core) 3.10.0-862.el7.x86_64 docker://18.9.6
此时有一台了,且状态为"NotReady"
# kubectl get pods --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-d5947d4b-h4wcv 0/1 Pending 0 14m <none> <none> <none> <none>
kube-system coredns-d5947d4b-mr86q 0/1 Pending 0 14m <none> <none> <none> <none>
kube-system etcd-k8s-master01 1/1 Running 0 13m 192.168.1.25 k8s-master01 <none> <none>
kube-system kube-apiserver-k8s-master01 1/1 Running 0 14m 192.168.1.25 k8s-master01 <none> <none>
kube-system kube-controller-manager-k8s-master01 1/1 Running 0 14m 192.168.1.25 k8s-master01 <none> <none>
kube-system kube-proxy-d84dh 1/1 Running 0 14m 192.168.1.25 k8s-master01 <none> <none>
kube-system kube-scheduler-k8s-master01 1/1 Running 0 13m 192.168.1.25 k8s-master01 <none> <none>
因为我们没有网络插件,所以Croedns处于 Pending
k8s v1.14.0特性加入集群方式
kubeadm join 192.168.1.16:8443 --token o3444m.kt32joh143khrgga \
--discovery-token-ca-cert-hash sha256:fdff2f2a155fd3c0bcbde02cf9b5cf48ca95f9dfdf7a2b8f492a3b36119edf2a \
--experimental-control-plane --certificate-key 52dcb9e043e802555d3f758e09cf7beb2c4e80628e6132f30b3a4ae5246ca9d1
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
k8s-master01 NotReady master 7m33s v1.14.0 192.168.1.25 <none> CentOS Linux 7 (Core) 3.10.0-862.el7.x86_64 docker://18.9.6
k8s-master02 NotReady master 4m28s v1.14.0 192.168.1.26 <none> CentOS Linux 7 (Core) 3.10.0-862.el7.x86_64 docker://18.9.6
k8s-master03 NotReady master 5m27s v1.14.0 192.168.1.196 <none> CentOS Linux 7 (Core) 3.10.0-862.el7.x86_64 docker://18.9.6
发现集群全部都处于NotReady
状态
是因为我们没有安装网络插件造成的,我们安装flannel网络插件
kubectl apply -f http://tools.zhangshoufu.com/tools/k8s/kube-flannel.yaml
查看安装是否成功
截止到现在 kubeadm 安装高可用master完成,
说明: 1,flannel采用的是Vxlan模式,需要可以自行更改 2,keepalive应该写一个监控脚本
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。