创建:kubectl create -f xxx.yaml
查询:
kubectl get pod yourPodName
kubectl describe pod yourPodName //这个我一般用来排查机器无故没有Running
删除:kubectl delete pod yourPodName
更新:kubectl replace /path/to/yourNewYaml.yaml
[[email protected] ~]# vim pod-redis.yaml
apiVersion: v1
kind: Pod
metadata:
name: pod-redis
labels:
name: redis
spec:
restartPolicy: Always #表明该容器一直运行,默认k8s的策略,在此容器退出后,会立即创建一个相同的容器
nodeSelector:
zone: node1 #节点选择,先给主机打标签kubectl label nodes kube-node1 zone=node1
containers:
- name: pod-redis
image: docker.io/redis
imagePullPolicy: Always #三个选择Always、Never、IfNotPresent,每次启动时检查和更新(registery)images的策略
ports:
- containerPort: 6379 #容器开发对外的端口
hostPort: 6379 #映射到主机的端口/对外映射的端口(一般可以不写)
编辑好以上yaml文件后,执行以下命令:
[root@master1 ~]# kubectl label nodes node1 node=kube-node1
node "node1" labeled
[root@master1 ~]# kubectl label nodes node2 node=kube-node2
node "node2" labeled
[root@master1 ~]# kubectl create -f pod-redis.yaml
pod "pod-redis" created
我的遇到了一些小的问题如下: 通过如下命令分析并查找原因:
details: (open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)
刚创建好pod后,出现了pod卡在ContainerCreating的状态,即容器无法启动问题,用命令 kubectl describe pod xxx
yum install *rhsm* -y
第二个问题:
[[email protected] ~]# kubectl describe pod pod-redis
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
4m 4m 1 {default-scheduler } Normal Scheduled Successfully assigned pod-redis to node1
2m 2m 1 {kubelet node1} Warning MissingClusterDNS kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.
2m 1s 13 {kubelet node1} spec.containers{pod-redis} Warning ErrImageNeverPull Container image "docker.io/redis" is not present with pull policy of Never
2m 1s 13 {kubelet node1} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "pod-redis" with ErrImageNeverPull: "Container image \"docker.io/redis\" is not present with pull policy of Never"
ErrImageNeverPull Container image "docker.io/redis" is not present with pull policy of Never 这一句是关键。
如上的错误是因为我在之前编译yaml文件的时候配置imagePullPolicy: Always 为Never。
[[email protected] ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
pod-redis 1/1 Running 0 1h
[[email protected] ~]# kubectl describe pod pod-redis
Name: pod-redis
Namespace: default
Node: node1/192.168.161.163
Start Time: Wed, 28 Feb 2018 14:23:55 +0800
Labels: name=redis
Status: Running
IP: 10.0.8.2
Controllers: <none>
Containers:
pod-redis:
Container ID: docker://34535bd0ed41b35c85ed0285c51e953b8557d6520a85151813334e5624172021
Image: docker.io/redis
Image ID: docker-pullable://docker.io/[email protected]:e55dff3a21a0e7ba25e91925ed0d926d959dac09f9099fd1bcc919263305f1e4
Port: 6379/TCP
State: Running
Started: Wed, 28 Feb 2018 15:33:35 +0800
Ready: True
Restart Count: 0
Volume Mounts: <none>
Environment Variables: <none>
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
No volumes.
QoS Class: BestEffort
Tolerations: <none>
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
1h 42m 2 {kubelet node1} Warning MissingClusterDNS kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.
42m 42m 1 {kubelet node1} spec.containers{pod-redis} Normal Pulled Successfully pulled image "docker.io/redis"
42m 42m 1 {kubelet node1} spec.containers{pod-redis} Normal Created Created container with docker id 34535bd0ed41; Security:[seccomp=unconfined]
42m 42m 1 {kubelet node1} spec.containers{pod-redis} Normal Started Started container with docker id 34535bd0ed41
telnet 一下node上面的6379端口
[[email protected] ~]# telnet 192.168.161.163 6379
Trying 192.168.161.163...
Connected to 192.168.161.163.
Escape character is '^]'.
[[email protected] ~]# ping 10.0.8.2
PING 10.0.8.2 (10.0.8.2) 56(84) bytes of data.
64 bytes from 10.0.8.2: icmp_seq=1 ttl=61 time=60.8 ms
64 bytes from 10.0.8.2: icmp_seq=2 ttl=61 time=1.01 ms
^C
--- 10.0.8.2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 1.012/30.942/60.873/29.931 ms
查看下路由表:
[[email protected] ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.161.2 0.0.0.0 UG 100 0 0 ens33
10.0.0.0 0.0.0.0 255.255.0.0 U 0 0 0 flannel0
10.0.57.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0
192.168.161.0 0.0.0.0 255.255.255.0 U 100 0 0 ens33
[[email protected] ~]# vim rc-nginx.yaml
apiVersion: v1 #指定api版本,此值必须在kubectl apiversion中,可通过命令kubectl api-versions 查看
kind: ReplicationController #指定创建资源的角色/类型
metadata: #资源的元数据/属性
name: rc-nginx-3 #资源的名字,在同一个namespace中必须唯一
spec:
replicas: 3 #副本数量3
template: #这里Pod的定义
metadata:
labels: #Pod的label,可以看到这个label与spec.selector相同
app: nginx-3
spec: #specification of the resource content 指定该资源的内容
restartPolicy: Always #表明该容器一直运行,默认k8s策略,在此容器退出后,会立即创建一个相同的容器
containers:
- name: nginx-3 #容器的名字
image: docker.io/nginx #容器使用的镜像地址
ports:
- containerPort: 80 #容器开发对外的端口
编辑好以上yaml文件后,执行以下命令:
[[email protected] ~]# kubectl create -f rc-nginx.yaml
replicationcontroller "rc-nginx-3" created
[[email protected] ~]# kubectl get rc
NAME DESIRED CURRENT READY AGE
rc-nginx-3 3 3 2 30s
[[email protected] ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
pod-redis 1/1 Running 0 2h
rc-nginx-3-1mrzc 0/1 ContainerCreating 0 1m
rc-nginx-3-ll60q 1/1 Running 0 1m
rc-nginx-3-pf4cb 1/1 Running 0 1m
[[email protected] ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE
pod-redis 1/1 Running 0 2h 10.0.8.2 node1
rc-nginx-3-1mrzc 0/1 ImagePullBackOff 0 5m 10.0.8.3 node1
rc-nginx-3-ll60q 1/1 Running 0 5m 10.0.98.3 node2
rc-nginx-3-pf4cb 1/1 Running 0 5m 10.0.98.2 node2
如上其中有一个pod 正在创建,应该是在下载镜像,我来看下:
[[email protected] ~]# kubectl describe pod rc-nginx-3-1mrzc
Events:
FirstSeen LastSeen Count From SubObjectPath Type Reason Message
--------- -------- ----- ---- ------------- -------- ------ -------
1m 1m 1 {default-scheduler } Normal Scheduled Successfully assigned rc-nginx-3-1mrzc to node1
1m 1m 1 {kubelet node1} Warning MissingClusterDNS kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to DNSDefault policy.
1m 1m 1 {kubelet node1} spec.containers{nginx-3} Normal Pulling pulling image "docker.io/nginx"
还在下载镜像...
当然,假如还有一些我经历过的,假如我们线上的资源有点不足,最快的“临时”解决方案:
通过修改replicas的数量来控制pod的启动数量
kubectl scale rc rc-nginx-3 --replicas=1 //重新缩减副本数到1
kubectl scale rc rc-nginx-3 --replicas=5 //重新扩展副本数到5
[[email protected] ~]# kubectl scale rc rc-nginx-3 --replicas=1
replicationcontroller "rc-nginx-3" scaled
[[email protected] ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
pod-redis 1/1 Running 0 2h
rc-nginx-3-ll60q 1/1 Running 0 17m
注:假如加入了特定的namespace,需要在rc后加上:
kubectl scale rc --namespace=kube-system rc-nginx-3 --replicas=1
近几天发现了一个更带劲的功能:
scale虽然能够很方便的对副本数进行扩展或缩小,但是仍然需要人工介入,不能实时自动的根据系统负载对副本数进行扩、缩。autoscale命令提供了自动根据pod负载对其副本进行扩缩的功能。
autoscale命令会给一个rc指定一个副本数的范围,在实际运行中根据pod中运行的程序的负载自动在指定的范围内对pod进行扩容或缩容。如前面创建的nginx,可以用如下命令指定副本范围在1~4
[[email protected] ~]# kubectl autoscale rc rc-nginx-3 --min=1 --max=3
replicationcontroller "rc-nginx-3" autoscaled
apiVersion: v1
kind: Service
metadata:
name: kube-system
labels:
name: kube-system
spec:
ports:
- port: 8081
targetPort: 80
protocol: TCP
selector:
name: nginx-3
这种新建的方法个人感觉不太常用,因为一般情况下,为了让客户端应用能够访问到 Pod 的实例,可以直接通过kubectl expose命令来创建Service的:
//为RC的nginx创建service,并通过Service的80端口转发至容器的8000端口上。
kubectl expose rc rc-nginx-3 --port=80 --target-port=8000
apiVersion: v1 #指定api版本,此值必须在kubectl apiversion中
kind: Pod #指定创建资源的角色/类型
metadata: #资源的元数据/属性
name: web04-pod #资源的名字,在同一个namespace中必须唯一
labels: #设定资源的标签,详情请见http://blog.csdn.net/liyingke112/article/details/77482384
k8s-app: apache
version: v1
kubernetes.io/cluster-service: "true"
annotations: #自定义注解列表
- name: String #自定义注解名字
spec:#specification of the resource content 指定该资源的内容
restartPolicy: Always #表明该容器一直运行,默认k8s的策略,在此容器退出后,会立即创建一个相同的容器
nodeSelector: #节点选择,先给主机打标签kubectl label nodes kube-node1 zone=node1
zone: node1
containers:
- name: web04-pod #容器的名字
image: web:apache #容器使用的镜像地址
imagePullPolicy: Never #三个选择Always、Never、IfNotPresent,每次启动时检查和更新(从registery)images的策略,
# Always,每次都检查
# Never,每次都不检查(不管本地是否有)
# IfNotPresent,如果本地有就不检查,如果没有就拉取
command: ['sh'] #启动容器的运行命令,将覆盖容器中的Entrypoint,对应Dockefile中的ENTRYPOINT
args: ["$(str)"] #启动容器的命令参数,对应Dockerfile中CMD参数
env: #指定容器中的环境变量
- name: str #变量的名字
value: "/etc/run.sh" #变量的值
resources: #资源管理,请求请见http://blog.csdn.net/liyingke112/article/details/77452630
requests: #容器运行时,最低资源需求,也就是说最少需要多少资源容器才能正常运行
cpu: 0.1 #CPU资源(核数),两种方式,浮点数或者是整数+m,0.1=100m,最少值为0.001核(1m)
memory: 32Mi #内存使用量
limits: #资源限制
cpu: 0.5
memory: 32Mi
ports:
- containerPort: 80 #容器开发对外的端口
name: httpd #名称
protocol: TCP
livenessProbe: #pod内容器健康检查的设置,详情请见http://blog.csdn.net/liyingke112/article/details/77531584
httpGet: #通过httpget检查健康,返回200-399之间,则认为容器正常
path: / #URI地址
port: 80
#host: 127.0.0.1 #主机地址
scheme: HTTP
initialDelaySeconds: 180 #表明第一次检测在容器启动后多长时间后开始
timeoutSeconds: 5 #检测的超时时间
periodSeconds: 15 #检查间隔时间
#也可以用这种方法
#exec: 执行命令的方法进行监测,如果其退出码不为0,则认为容器正常
# command:
# - cat
# - /tmp/health
#也可以用这种方法
#tcpSocket: //通过tcpSocket检查健康
# port: number
lifecycle: #生命周期管理
postStart: #容器运行之前运行的任务
exec:
command:
- 'sh'
- 'yum upgrade -y'
preStop:#容器关闭之前运行的任务
exec:
command: ['service httpd stop']
volumeMounts: #详情请见http://blog.csdn.net/liyingke112/article/details/76577520
- name: volume #挂载设备的名字,与volumes[*].name 需要对应
mountPath: /data #挂载到容器的某个路径下
readOnly: True
volumes: #定义一组挂载设备
- name: volume #定义一个挂载设备的名字
#meptyDir: {}
hostPath:
path: /opt #挂载设备类型为hostPath,路径为宿主机下的/opt,这里设备类型支持很多种