管理K8S核心资源的三种基本方法
[root@zdd211-21 ~]# kubectl get namespace
NAME STATUS AGE
default Active 16h
kube-node-lease Active 16h
kube-public Active 16h
kube-system Active 16h
[root@zdd211-21 ~]# kubectl get all -n default
NAME READY STATUS RESTARTS AGE
pod/nginx-ds-8r8sc 1/1 Running 0 128m
pod/nginx-ds-pdznf 1/1 Running 0 128m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 16h
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
daemonset.apps/nginx-ds 2 2 2 2 2 <none> 128m
[root@zdd211-21 ~]# kubectl create ns app
namespace/app created
[root@zdd211-21 ~]# kubectl get ns
NAME STATUS AGE
app Active 12s
default Active 16h
kube-node-lease Active 16h
kube-public Active 16h
kube-system Active 16h
[root@zdd211-21 ~]# kubectl delete namespace app
namespace "app" deleted
[root@zdd211-21 ~]# kubectl get ns
NAME STATUS AGE
default Active 16h
kube-node-lease Active 16h
kube-public Active 16h
kube-system Active 16h
在kube-public 空间里创建一个名字为nginx-dp 的 deployment ,,使用nginx 镜像
[root@zdd211-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
[root@zdd211-21 ~]# kubectl get deployment -n kube-public
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-dp 1/1 1 1 48s
简单查看
[root@zdd211-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-2m8hc 1/1 Running 0 2m24s
扩展查看
[root@zdd211-21 ~]# kubectl get pods -n kube-public -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-dp-5dfc689474-2m8hc 1/1 Running 0 3m52s 172.7.21.3 zdd211-21.host.com <none> <none>
详细查看
[root@zdd211-21 ~]# kubectl describe deployment nginx-dp -n kube-public
Name: nginx-dp
Namespace: kube-public
CreationTimestamp: Sun, 01 Mar 2020 17:41:06 +0800
Labels: app=nginx-dp
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=nginx-dp
Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
................
[root@zdd211-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-2m8hc 1/1 Running 0 15m
[root@zdd211-21 ~]# kubectl exec -it nginx-dp-5dfc689474-2m8hc -n kube-public bash
root@nginx-dp-5dfc689474-2m8hc:/# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
8: eth0@if9: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
link/ether 02:42:ac:07:15:03 brd ff:ff:ff:ff:ff:ff
inet 172.7.21.3/24 brd 172.7.21.255 scope global eth0
valid_lft forever preferred_lft forever
root@nginx-dp-5dfc689474-2m8hc:/# hostname
nginx-dp-5dfc689474-2m8hc
删除pod资源,实际上是重启pod,控制器会再次拉起
[root@zdd211-21 ~]# kubectl delete pods nginx-dp-5dfc689474-2m8hc -n kube-public
pod "nginx-dp-5dfc689474-2m8hc" deleted
[root@zdd211-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-qk5j2 1/1 Running 0 7s
强制删除
[root@zdd211-21 ~]# kubectl delete pods nginx-dp-5dfc689474-qk5j2 -n kube-public --force --grace-period=0
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "nginx-dp-5dfc689474-qk5j2" force deleted
[root@zdd211-21 ~]# kubectl delete deployment nginx-dp -n kube-public
deployment.extensions "nginx-dp" deleted
[root@zdd211-21 ~]# kubectl get pods -n kube-public
No resources found.
[root@zdd211-21 ~]# kubectl create deployment nginx-dp --image=harbor.od.com/public/nginx:v1.7.9 -n kube-public
deployment.apps/nginx-dp created
[root@zdd211-21 ~]# kubectl get pod -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-s8cfw 1/1 Running 0 20s
[root@zdd211-21 ~]# kubectl expose deployment nginx-dp --port=80 -n kube-public
service/nginx-dp exposed
[root@zdd211-21 ~]# kubectl get svc -n kube-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-dp ClusterIP 192.168.197.188 <none> 80/TCP 62s
[root@zdd211-21 ~]# kubectl get pod,svc -n kube-public -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/nginx-dp-5dfc689474-s8cfw 1/1 Running 0 3m7s 172.7.22.3 zdd211-22.host.com <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/nginx-dp ClusterIP 192.168.197.188 <none> 80/TCP 97s app=nginx-dp
[root@zdd211-21 ~]# kubectl describe svc nginx-dp -n kube-public
Name: nginx-dp
Namespace: kube-public
Labels: app=nginx-dp
Annotations: <none>
Selector: app=nginx-dp
Type: ClusterIP
IP: 192.168.197.188
Port: <unset> 80/TCP
TargetPort: 80/TCP
Endpoints: 172.7.21.3:80,172.7.22.3:80
Session Affinity: None
Events: <none>
无论pod删除了之后飘逸到哪个node节点,service暴露出来的cluster ip都不会改变
[root@zdd211-21 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.211.55.21:6443 Masq 1 0 0
-> 10.211.55.22:6443 Masq 1 0 0
TCP 192.168.197.188:80 nq
-> 172.7.22.3:80 Masq 1 0 0
[root@zdd211-21 ~]# kubectl scale deployment nginx-dp --replicas=2 -n kube-public
deployment.extensions/nginx-dp scaled
[root@zdd211-21 ~]#
[root@zdd211-21 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.211.55.21:6443 Masq 1 0 0
-> 10.211.55.22:6443 Masq 1 0 0
TCP 192.168.197.188:80 nq
-> 172.7.21.3:80 Masq 1 0 0
-> 172.7.22.3:80 Masq 1 0 0
[root@zdd211-21 ~]# curl 192.168.197.188
kubectl是官方的CLT命令行工具,用于与apiserver进行通信,将用户在命令行输入的命令,组织并转化为apiservice能识别的信息,进而实现管理K8S各种资源的一种有效途径
[root@zdd211-21 ~]# kubectl --help
kubectl controls the Kubernetes cluster manager.
Find more information at: https://kubernetes.io/docs/reference/kubectl/overview/
Basic Commands (Beginner):
create Create a resource from a file or from stdin.
expose Take a replication controller, service, deployment or pod and expose it as a new
Kubernetes Service
run Run a particular image on the cluster
set Set specific features on objects
Basic Commands (Intermediate):
explain Documentation of resources
get Display one or many resources
edit Edit a resource on the server
delete Delete resources by filenames, stdin, resources and names, or by resources and
label selector
Deploy Commands:
rollout Manage the rollout of a resource
scale Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job
autoscale Auto-scale a Deployment, ReplicaSet, or ReplicationController
[root@zdd211-21 ~]# kubectl get pods -n kube-public
NAME READY STATUS RESTARTS AGE
nginx-dp-5dfc689474-gwghg 1/1 Running 0 13m
nginx-dp-5dfc689474-s8cfw 1/1 Running 0 19m
[root@zdd211-21 ~]# kubectl get pods nginx-dp-5dfc689474-gwghg -o yaml -n kube-public
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: "2020-03-01T10:17:20Z"
generateName: nginx-dp-5dfc689474-
labels:
app: nginx-dp
pod-template-hash: 5dfc689474
name: nginx-dp-5dfc689474-gwghg
namespace: kube-public
ownerReferences:
- apiVersion: apps/v1
blockOwnerDeletion: true
controller: true
kind: ReplicaSet
name: nginx-dp-5dfc689474
uid: 7fcc908c-7f34-465e-95d3-5a5db3ef4aa6
resourceVersion: "73706"
...............
[root@zdd211-21 ~]# kubectl explain service.metadata
KIND: Service
VERSION: v1
RESOURCE: metadata <Object>
DESCRIPTION:
Standard object's metadata. More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
ObjectMeta is metadata that all persisted resources must have, which
includes all objects users must create.
FIELDS:
annotations <map[string]string>
Annotations is an unstructured key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. They
are not queryable and should be preserved when modifying objects. More
info: http://kubernetes.io/docs/user-guide/annotations
clusterName <string>
The name of the cluster which the object belongs to. This is used to
distinguish resources with same name and namespace in different clusters.
This field is not set anywhere right now and apiserver is going to ignore
it if set in create or update request.
.............
必要要有的四种资源apiversion、kind、metadata、spec
[root@zdd211-21 ~]# vim nginx-ds-svn.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx-ds
name: nginx-ds
namespace: default
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
[root@zdd211-21 ~]# kubectl create -f nginx-ds-svn.yaml
service/nginx-ds created
[root@zdd211-21 ~]# kubectl get svc nginx-ds
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-ds ClusterIP 192.168.188.248 <none> 80/TCP 48s
修改端口测试
修改nginx-ds-svc.yaml文件,然后用kubectl apply -f nginx-ds-svn.yaml文件使之更改
[root@zdd211-21 ~]# vim nginx-ds-svn.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: nginx-ds
name: nginx-ds
namespace: default
spec:
ports:
- port: 180
protocol: TCP
targetPort: 80
selector:
app: nginx-ds
sessionAffinity: None
type: ClusterIP
[root@zdd211-21 ~]# kubectl delete -f nginx-ds-svn.yaml
service "nginx-ds" deleted
[root@zdd211-21 ~]# kubectl apply -f nginx-ds-svn.yaml
service/nginx-ds created
[root@zdd211-21 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 17h
nginx-ds ClusterIP 192.168.118.140 <none> 180/TCP 2s
直接使用kubectl edit service nginx-ds
[root@zdd211-21 ~]# kubectl edit service nginx-ds
[root@zdd211-21 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 17h
nginx-ds ClusterIP 192.168.118.140 <none> 8080/TCP 3m11s
[root@zdd211-21 ~]# kubectl delete service nginx-dp -n kube-public
service "nginx-dp" deleted
[root@zdd211-21 ~]# kubectl delete -f nginx-ds-svn.yaml
service "nginx-ds" deleted
见后面的dashboard界面的安装进行管理
主机名 | 角色 | ip |
---|---|---|
zdd211-21.host.com | flannel | 10.2211.55.21 |
zdd211-22.host.com | flannel | 10.2211.55.22 |
注意:这里部署文档以zdd211-21.host.com主机为例,另一台安装部署方法类似
[root@zdd211-21 ~]# kubectl get pod -n kube-public -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-dp-5dfc689474-gwghg 1/1 Running 0 147m 172.7.21.3 zdd211-21.host.com <none> <none>
nginx-dp-5dfc689474-s8cfw 1/1 Running 0 152m 172.7.22.3 zdd211-22.host.com <none> <none>
[root@zdd211-21 ~]# ping 172.7.22.3
PING 172.7.22.3 (172.7.22.3) 56(84) bytes of data.
^C
[root@zdd211-21 ~]# cd /opt/src/
[root@zdd211-21 src]# wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[root@zdd211-21 src]# ll
total 452536
-rw-r--r-- 1 root root 9850227 Feb 29 23:18 etcd-v3.1.20-linux-amd64.tar.gz
-rw------- 1 root root 9565743 Feb 24 02:12 flannel-v0.11.0-linux-amd64.tar.gz
-rw------- 1 root root 443976803 Feb 24 02:16 kubernetes-server-linux-amd64-v1.15.4.tar.gz
[root@zdd211-21 src]# mkdir /opt/flannel-v0.11.0
[root@zdd211-21 src]# tar -xf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/flannel-v0.11.0/
[root@zdd211-21 src]#ln -sf /opt/flannel-v0.11.0/ /opt/flannel
[root@zdd211-21 opt]# cd /opt/flannel
[root@zdd211-21 flannel]# ll
total 34436
-rwxr-xr-x 1 root root 35249016 Jan 29 2019 flanneld
-rwxr-xr-x 1 root root 2139 Oct 23 2018 mk-docker-opts.sh
-rw-r--r-- 1 root root 4300 Oct 23 2018 README.md
[root@zdd211-21 flannel]# mkdir certs
[root@zdd211-21 flannel]# cd certs/
[root@zdd211-21 certs]# scp zdd211-200:/opt/certs/ca.pem .
root@zdd211-200's password:
ca.pem 100% 1342 1.3MB/s 00:00
[root@zdd211-21 certs]# scp zdd211-200:/opt/certs/client.pem .
root@zdd211-200's password:
client.pem 100% 1363 2.2MB/s 00:00
[root@zdd211-21 certs]# scp zdd211-200:/opt/certs/client-key.pem .
root@zdd211-200's password:
client-key.pem 100% 1675 2.7MB/s 00:00
[root@zdd211-21 certs]# ll
total 12
-rw-r--r-- 1 root root 1342 Mar 1 20:22 ca.pem
-rw------- 1 root root 1675 Mar 1 20:23 client-key.pem
-rw-r--r-- 1 root root 1363 Mar 1 20:22 client.pem
[root@zdd211-21 certs]# cd ..
[root@zdd211-21 flannel]# vim subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.21.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
[root@zdd211-21 flannel]# vim flanneld.sh
#!/bin/sh
./flanneld \
--public-ip=10.211.55.21 \
--etcd-endpoints=https://10.211.55.12:2379,https://10.211.55.21:2379,https://10.211.55.22:2379 \
--etcd-keyfile=./certs/client-key.pem \
--etcd-certfile=./certs/client.pem \
--etcd-cafile=./certs/ca.pem \
--iface=eth0 \
--subnet-file=./subnet.env \
--healthz-port=2401
注意:flannel集群各主机的启动脚本略有不同,部署其他节点是需要修改成本地ip
[root@zdd211-21 flannel]# chmod +x flanneld.sh
[root@zdd211-21 flannel]# mkdir -p /data/logs/flanneld
[root@zdd211-21 flannel]# cd /opt/etcd
[root@zdd211-21 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
[root@zdd211-21 etcd]# ./etcdctl get /coreos.com/network/config
{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}
附:flannel的其网络模型 VxLAN模型: ''{"Network": "172.7.0.0/16", "Backend": {"Type": "VxLAN"}}'' 直接路由模型: ''{"Network": "172.7.0.0/16", "Backend": {"Type": "xLAN","Diretrouting":true}}''
[root@zdd211-21 flannel]# vim /etc/supervisord.d/flannel.ini
[program:flanneld-211-21]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@zdd211-21 etcd]# supervisorctl update
flanneld-211-21: added process group
[root@zdd211-21 etcd]# supervisorctl status
etcd-server-211-21 RUNNING pid 6457, uptime 4:46:29
flanneld-211-21 RUNNING pid 9776, uptime 0:01:51
kube-apiserver-211-21 RUNNING pid 6455, uptime 4:46:29
kube-controller-manager-211-21 RUNNING pid 6461, uptime 4:46:29
kube-kubelet-211-21 RUNNING pid 8475, uptime 4:43:26
kube-proxy-211-21 RUNNING pid 6456, uptime 4:46:29
kube-scheduler-211-21 RUNNING pid 6458, uptime 4:46:29
另外一个节点,跟以上步骤一样,需要修改脚本上的地址,唯一区别就是不需要再给etcd操作
[root@zdd211-21 ~]# ping 172.7.22.3
PING 172.7.22.3 (172.7.22.3) 56(84) bytes of data.
64 bytes from 172.7.22.3: icmp_seq=1 ttl=63 time=0.218 ms
64 bytes from 172.7.22.3: icmp_seq=2 ttl=63 time=0.446 ms
64 bytes from 172.7.22.3: icmp_seq=3 ttl=63 time=0.434 ms
64 bytes from 172.7.22.3: icmp_seq=4 ttl=63 time=0.458 ms
64 bytes from 172.7.22.3: icmp_seq=5 ttl=63 time=0.446 ms
64 bytes from 172.7.22.3: icmp_seq=6 ttl=63 time=0.215 ms
64 bytes from 172.7.22.3: icmp_seq=7 ttl=63 time=0.334 ms
^C
--- 172.7.22.3 ping statistics ---
7 packets transmitted, 7 received, 0% packet loss, time 6001ms
rtt min/avg/max/mdev = 0.215/0.364/0.458/0.102 ms
PING 172.7.21.3 (172.7.21.3) 56(84) bytes of data.
64 bytes from 172.7.21.3: icmp_seq=1 ttl=63 time=0.242 ms
64 bytes from 172.7.21.3: icmp_seq=2 ttl=63 time=0.461 ms
64 bytes from 172.7.21.3: icmp_seq=3 ttl=63 time=0.301 ms
64 bytes from 172.7.21.3: icmp_seq=4 ttl=63 time=0.395 ms
^C
--- 172.7.21.3 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 2999ms
rtt min/avg/max/mdev = 0.242/0.349/0.461/0.087 ms
注意:iptables规则各主机上稍微有不同,其他运算节点上执行时注意修改
[root@zdd211-21 ~]# yum install -y iptables-services
[root@zdd211-21 ~]# systemctl start iptables.service && systemctl enable iptables.service
Created symlink from /etc/systemd/system/basic.target.wants/iptables.service to /usr/lib/systemd/system/iptables.service.
[root@zdd211-21 ~]# iptables-save |grep -i reject
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@zdd211-21 ~]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@zdd211-21 ~]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@zdd211-21 ~]# iptables-save |grep -i postrouting
:POSTROUTING ACCEPT [17:1032]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "Kubernetes endpoints dst ip:port, source ip for solving hairpin purpose" -m set --match-set KUBE-LOOP-BACK dst,dst,src -j MASQUERADE
[root@zdd211-21 ~]# iptables -t nat -D POSTROUTING -s 172.7.21.0/24 ! -o docker0 -j MASQUERADE
[root@zdd211-21 ~]# iptables -t nat -I POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@zdd211-21 ~]# iptables-save |grep -i postrouting
:POSTROUTING ACCEPT [2:120]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -s 172.7.21.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "Kubernetes endpoints dst ip:port, source ip for solving hairpin purpose" -m set --match-set KUBE-LOOP-BACK dst,dst,src -j MASQUERADE
10.211.55.21主机上的,来源是172.7.21.0/24段的docker的ip,目标ip不是172.7.0.0/16段,网络不从docker0网桥设备出站的,才进行SNAT转换
[root@zdd211-21 ~]# iptables-save >/etc/sysconfig/iptables
注意:K8S里的DNS不是万能的!它应该只负责自动维护“服务名”>"集群网络IP"之间的关系
只需要在运维主机zdd211-200.host.com上,配置一个nginx虚拟主机,用以提K8S统一的资源配置清单访问入口
[root@zdd211-200 ~]# vim /etc/nginx/conf.d/k8s-yaml.od.com.conf
server {
listen 80;
server_name k8s-yaml.od.com;
location / {
autoindex on;
default_type text/plain;
root /data/k8s-yaml;
}
}
[root@zdd211-200 ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@zdd211-200 ~]# nginx -s reload
[root@zdd211-200 ~]# cd /data/k8s-yaml/
[root@zdd211-200 k8s-yaml]# mkdir coredns
在zdd211-11.host.com上
[root@zdd211-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2020022103 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.211.55.11
harbor A 10.211.55.200
k8s-yaml A 10.211.55.200
[root@zdd211-11 ~]# systemctl restart named
[root@zdd211-11 ~]# dig -t A k8s-yaml.od.com @10.211.55.11 +short
10.211.55.200
打开浏览器输入url:k8s-yaml.od.com

[root@zdd211-200 ~]# docker pull coredns/coredns:1.6.1
[root@zdd211-200 ~]# docker images|grep coredns
coredns/coredns 1.6.1 c0f6e815079e 7 months ago 42.2MB
[root@zdd211-200 ~]# docker tag c0f6e815079e harbor.od.com/public/coredns:v1.6.1
[root@zdd211-200 ~]# docker push harbor.od.com/public/coredns:v1.6.1
在zdd211-200.host.com
[root@zdd211-200 coredns]# pwd
/data/k8s-yaml/coredns
[root@zdd211-200 coredns]# vim rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
[root@zdd211-200 coredns]# vim cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 192.168.0.0/16
forward . 10.211.55.11
cache 30
loop
reload
loadbalance
}
[root@zdd211-200 coredns]# vim dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
containers:
- name: coredns
image: harbor.od.com/public/coredns:v1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
[root@zdd211-200 coredns]# vim svc.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 192.168.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
- name: metrics
port: 9153
protocol: TCP
[root@zdd211-200 coredns]# ls -l
total 12
-rw-r--r-- 1 root root 322 Mar 1 23:25 cm.yaml
-rw-r--r-- 1 root root 1294 Mar 1 23:23 dp.yaml
-rw-r--r-- 1 root root 0 Mar 1 23:20 rbac.yaml
-rw-r--r-- 1 root root 387 Mar 1 23:25 svc.yaml
任意一个运算节点zdd211-21.host.com
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/rbac.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/cm.yaml
configmap/coredns created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/dp.yaml
deployment.apps/coredns created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/coredns/svc.yaml
service/coredns created
[root@zdd211-21 ~]# kubectl get all -n kube-system
NAME READY STATUS RESTARTS AGE
pod/coredns-6b6c4f9648-5sclg 1/1 Running 0 38s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 30s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/coredns 1/1 1 1 38s
NAME DESIRED CURRENT READY AGE
replicaset.apps/coredns-6b6c4f9648 1 1 1 38s
[root@zdd211-21 ~]# dig -t A www.baidu.com @192.168.0.2 +short
www.a.shifen.com.
61.135.169.121
61.135.169.125
[root@zdd211-21 ~]# kubectl get pods,svc -n kube-public
NAME READY STATUS RESTARTS AGE
pod/nginx-dp-5dfc689474-gwghg 1/1 Running 0 5h20m
pod/nginx-dp-5dfc689474-s8cfw 1/1 Running 0 5h26m
[root@zdd211-21 ~]# kubectl expose deployment nginx-dp --port 80 -n kube-public
service/nginx-dp exposed
[root@zdd211-21 ~]# kubectl get pods,svc -n kube-public
NAME READY STATUS RESTARTS AGE
pod/nginx-dp-5dfc689474-gwghg 1/1 Running 0 5h21m
pod/nginx-dp-5dfc689474-s8cfw 1/1 Running 0 5h27m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx-dp ClusterIP 192.168.50.46 <none> 80/TCP 5s
[root@zdd211-21 ~]# dig -t A nginx-dp.kube-public.svc.cluster.local. @192.168.0.2 +short
192.168.50.46
[github]:https://github.com/containous/traefik
[root@zdd211-200 coredns]# docker pull traefik:v1.7.2-alpine
[root@zdd211-200 coredns]# docker images|grep traefik
traefik v1.7.2-alpine add5fac61ae5 17 months ago 72.4MB
[root@zdd211-200 coredns]# docker tag add5fac61ae5 harbor.od.com/public/traefik:v1.7.2
[root@zdd211-200 coredns]# docker push harbor.od.com/public/traefik:v1.7.2
使用的是nodeport型service暴露服务
在zdd211-200.host.com
[root@zdd211-200 coredns]# cd ..
[root@zdd211-200 k8s-yaml]# mkdir traefik
[root@zdd211-200 k8s-yaml]# cd traefik/
[root@zdd211-200 traefik]# vim rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
[root@zdd211-200 traefik]# vi ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: traefik-ingress
namespace: kube-system
labels:
k8s-app: traefik-ingress
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress
name: traefik-ingress
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: harbor.od.com/public/traefik:v1.7.2
name: traefik-ingress
ports:
- name: controller
containerPort: 80
hostPort: 81
- name: admin-web
containerPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
- --insecureskipverify=true
- --kubernetes.endpoint=https://10.211.55.10:7443
- --accesslog
- --accesslog.filepath=/var/log/traefik_access.log
- --traefiklog
- --traefiklog.filepath=/var/log/traefik.log
- --metrics.prometheus
[root@zdd211-200 traefik]# vim svc.yaml
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress
ports:
- protocol: TCP
port: 80
name: controller
- protocol: TCP
port: 8080
name: admin-web
[root@zdd211-200 traefik]# vim ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: traefik.od.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
[root@zdd211-200 traefik]# ls -l
total 16
-rw-r--r-- 1 root root 1099 Mar 1 23:56 ds.yaml
-rw-r--r-- 1 root root 334 Mar 2 00:00 ingress.yaml
-rw-r--r-- 1 root root 800 Mar 1 23:53 rbac.yaml
-rw-r--r-- 1 root root 269 Mar 1 23:57 svc.yaml
任意一个运算节点zdd211-21.host.com
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/rbac.yaml
serviceaccount/traefik-ingress-controller created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ds.yaml
daemonset.extensions/traefik-ingress created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/svc.yaml
service/traefik-ingress-service created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/traefik/ingress.yaml
ingress.extensions/traefik-web-ui created
[root@zdd211-21 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-5sclg 1/1 Running 0 40m
traefik-ingress-9927c 0/1 ContainerCreating 0 9m36s
traefik-ingress-sqt2n 0/1 ContainerCreating 0 9m36s
##发现一直没有起来,查看描述排错
[root@zdd211-21 ~]# kubectl describe pods traefik-ingress-sqt2n -n kube-system
。。。。。。。
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 10m default-scheduler Successfully assigned kube-system/traefik-ingress-sqt2n to zdd211-22.host.com
Warning FailedCreatePodSandBox 10m kubelet, zdd211-22.host.com Failed create pod sandbox: rpc error: code = Unknown desc = failed to start sandbox container for pod "traefik-ingress-sqt2n": Error response from daemon: driver failed programming external connectivity on endpoint k8s_POD_traefik-ingress-sqt2n_kube-system_8bd9dde1-4e55-48e1-bbbc-d5c00643956a_0 (b4a18c43d70f2084269cd3a0f4fbfefab137fe9da9b8134cf52314dfa78293f5): (iptables failed: iptables --wait -t filter -A DOCKER ! -i docker0 -o docker0 -p tcp -d 172.7.22.4 --dport 80 -j ACCEPT: iptables: No chain/target/match by that name.
(exit status 1))
。。。。。。
##查看描述之后发现有一个iptables的错误,两个运算节点重启一下docker,重新创建ipables规则
[root@zdd211-21 ~]# systemctl restart docker
[root@zdd211-22 ~]# systemctl restart docker
[root@zdd211-21 ~]# kubectl get pod,svc,ingress -n kube-system
NAME READY STATUS RESTARTS AGE
pod/coredns-6b6c4f9648-5sclg 1/1 Running 0 81m
pod/traefik-ingress-9927c 1/1 Running 0 50m
pod/traefik-ingress-sqt2n 1/1 Running 0 50m
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/coredns ClusterIP 192.168.0.2 <none> 53/UDP,53/TCP,9153/TCP 81m
service/traefik-ingress-service ClusterIP 192.168.69.149 <none> 80/TCP,8080/TCP 50m
NAME HOSTS ADDRESS PORTS AGE
ingress.extensions/traefik-web-ui traefik.od.com 80 45m
在zdd211-11.host.com上
[root@zdd211-11 ~]# vim /var/named/od.com.zone
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2020022104 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.211.55.11
harbor A 10.211.55.200
k8s-yaml A 10.211.55.200
traefik A 10.211.55.10
[root@zdd211-11 ~]# systemctl restart named
Zdd211-11.host.com和zdd211-12.host.com两台主机上nginx均需要配置,这里可以考虑使用saltstack或者ansible自动化运维工具统计配置管理
[root@zdd211-12 ~]# vim /etc/nginx/conf.d/od.com.conf
upstream default_backend_traefik {
server 10.4.7.21:81 max_fails=3 fail_timeout=10s;
server 10.4.7.22:81 max_fails=3 fail_timeout=10s;
}
server {
server_name *.od.com;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@zdd211-12 ~]# nginx -s reload
image.png
在zdd211-200操作:
# 镜像准备
# 因不可描述原因,无法访问k8s.gcr.io,改成registry.aliyuncs.com/google_containers
[root@zdd211-200 ~]# docker pull registry.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
[root@zdd211-200 ~]# docker image tag f9aed6605b81 harbor.od.com/public/kubernetes-dashboard-amd64:v1.10.1
[root@zdd211-200 ~]# docker image push harbor.od.com/public/kubernetes-dashboard-amd64:v1.10.1
在zdd211-200操作:
清单文件存放到 zdd211-200:/data/k8s-yaml/dashboard/dashboard_1.10.1
[root@zdd211-200 k8s-yaml]# pwd
/data/k8s-yaml
[root@zdd211-200 k8s-yaml]# mkdir -p dashboard/dashboard_1.10.1
[root@zdd211-200 k8s-yaml]# cd dashboard/dashboard_1.10.1/
[root@zdd211-200 dashboard_1.10.1]#
当前为dashboard的默认权限
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-admin
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-admin
namespace: kube-system
[root@zdd211-200 k8s-yaml]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
[root@zdd211-200 k8s-yaml]# vi deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: harbor.od.com/public/kubernetes-dashboard-amd64:v1.10.1
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 50m
memory: 100Mi
ports:
- containerPort: 8443
protocol: TCP
args:
# PLATFORM-SPECIFIC ARGS HERE
- --auto-generate-certificates
volumeMounts:
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard-admin
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
[root@zdd211-200 k8s-yaml]# vi ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: dashboard.od.com
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
[root@zdd211-200 dashboard_1.10.1]# ll
total 16
-rw-r--r-- 1 root root 1393 Mar 2 01:20 deployment.yaml
-rw-r--r-- 1 root root 318 Mar 2 01:22 ingress.yaml
-rw-r--r-- 1 root root 1548 Mar 2 01:20 rbac.yaml
-rw-r--r-- 1 root root 322 Mar 2 01:23 service.yaml
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dashboard_1.10.1/rbac.yaml
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dashboard_1.10.1/deployment.yaml
deployment.apps/kubernetes-dashboard created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dashboard_1.10.1/service.yaml
service/kubernetes-dashboard created
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/dashboard/dashboard_1.10.1/ingress.yaml
ingress.extensions/kubernetes-dashboard created
在zdd211-11主机上操作:
[root@zdd211-11 ~]# vim /var/named/od.com.zone
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2020022105 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.211.55.11
harbor A 10.211.55.200
k8s-yaml A 10.211.55.200
traefik A 10.211.55.10
dashboard A 10.211.55.10
[root@zdd211-11 ~]# systemctl restart named.service
在zdd211-11和zdd211-12上创建证书路径
[root@zdd211-11 ~]# mkdir /etc/nginx/certs/
在zdd211-200上操作:
[root@zdd211-200 ~]# cd /opt/certs/
[root@zdd211-200 certs]# (umask 077; openssl genrsa -out dashboard.od.com.key 2048)
[root@zdd211-200 certs]# openssl req -new -key dashboard.od.com.key -out dashboard.od.com.csr -subj "/CN=dashboard.od.com/C=CN/ST=BJ/L=Beijing/O=OldboyEdu/OU=ops"
[root@zdd211-200 certs]# openssl x509 -req -in dashboard.od.com.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out dashboard.od.com.crt -days 3650
[root@zdd211-200 certs]# ll dashboard.od.com.*
-rw-r--r-- 1 root root 1196 Jan 29 20:52 dashboard.od.com.crt
-rw-r--r-- 1 root root 1005 Jan 29 20:51 dashboard.od.com.csr
-rw------- 1 root root 1675 Jan 29 20:51 dashboard.od.com.key
[root@zdd211-200 certs]# scp dashboard.od.com.key dashboard.od.com.crt hdss7-11:/etc/nginx/certs/
[root@zdd211-200 certs]# scp dashboard.od.com.key dashboard.od.com.crt hdss7-12:/etc/nginx/certs/
hdss7-11和hdss7-12都需要操作
[root@zdd211-11 ~]# vim /etc/nginx/conf.d/dashborad.conf
server {
listen 80;
server_name dashboard.od.com;
rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
listen 443 ssl;
server_name dashboard.od.com;
ssl_certificate "certs/dashboard.od.com.crt";
ssl_certificate_key "certs/dashboard.od.com.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@zdd211-11 ~]# nginx -t && nginx -s reload
建议使用火狐浏览器
image.png
[root@zdd211-21 ~]# kubectl get secret -n kube-system|grep kubernetes-dashboard-admin
kubernetes-dashboard-admin-token-h68bw kubernetes.io/service-account-token 3 17m
[root@zdd211-21 ~]# kubectl describe secret kubernetes-dashboard-admin-token-h68bw -n kube-system|grep token
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1ocjVyaiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImZhNzAxZTRmLWVjMGItNDFkNS04NjdmLWY0MGEwYmFkMjFmNSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.SDUZEkH_N0B6rjm6bW_jN03F4pHCPafL3uKD2HU0ksM0oenB2425jxvfi16rUbTRCsfcGqYXRrE2x15gpb03fb3jJy-IhnInUnPrw6ZwEdqWagen_Z4tdFhUgCpdjdShHy40ZPfql_iuVKbvv7ASt8w8v13Ar3FxztyDyLScVO3rNEezT7JUqMI4yj5LYQ0IgpSXoH12tlDSTyX8Rk2a_3QlOM_yT5GB_GEZkwIESttQKVr7HXSCrQ2tEdYA4cYO2AbF1NgAo_CVBNNvZLvdDukWiQ_b5zwOiO0cUbbiu46x_p6gjNWzVb7zHNro4gh0Shr4hIhiRQot2DJ-sq94Ag
image.png
*准备headpter镜像
[root@zdd211-200 k8s-yaml]# mkdir heapster
[root@zdd211-200 k8s-yaml]# cd heapster/
[root@zdd211-200 heapster]# docker pull quay.io/bitnami/heapster:1.5.4
[root@zdd211-200 heapster]# docker images |grep heapster
quay.io/bitnami/heapster 1.5.4 c359b95ad38b 12 months ago 136MB
[root@zdd211-200 heapster]# docker tag c359b95ad38b harbor.od.com/public/heapster:1.5.4
[root@zdd211-200 heapster]# docker push harbor.od.com/public/heapster:1.5.4
[root@zdd211-200 heapster]# vim heapster.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: heapster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
image: harbor.od.com/public/heapster:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /opt/bitnami/heapster/bin/heapster
- --source=kubernetes:https://kubernetes.default
---
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
# For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
# If you are NOT using this as an addon, you should comment out this line.
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
[root@zdd211-21 ~]# kubectl apply -f http://k8s-yaml.od.com/heapster/heapster.yaml
[root@zdd211-21 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-5sclg 1/1 Running 0 3h57m
heapster-7cb6dc7b94-vmcf5 1/1 Running 0 5s
kubernetes-dashboard-747c5cdb5d-h7w4c 1/1 Running 0 80m
traefik-ingress-9927c 1/1 Running 0 3h26m
traefik-ingress-sqt2n 1/1 Running 0 3h26m
重新登陆以后可以看到pod的资源使用情况,之前的文章有介绍使用promethus监控k8s中集群资源