我创建了一个具有1个主设备和2个工作器的kubernetes集群。当我创建一个Pod时,一切都成功了,但是后来,我发现Pod运行不正常,从一分钟到另一分钟,它崩溃了,它指示CrashLoopBackOff错误,并且当我尝试查看其日志时,出现另一个错误,是“服务器拨号tcp xx.xx.xx.xx:10250:连接时出错:连接主机没有路由”
我正在使用CentOS7
[root@master-node ~]# kubectl get pods --output=yaml
apiVersion: v1
items:
- apiVersion: v1
kind: Pod
metadata:
annotations:
cni.projectcalico.org/podIP: 192.168.180.194/32
cni.projectcalico.org/podIPs: 192.168.180.194/32
creationTimestamp: "2020-08-27T13:26:30Z"
labels:
run: gateway-pod
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:labels:
.: {}
f:run: {}
f:spec:
f:containers:
k:{"name":"gateway-pod"}:
.: {}
f:image: {}
f:imagePullPolicy: {}
f:name: {}
f:resources: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:dnsPolicy: {}
f:enableServiceLinks: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext: {}
f:terminationGracePeriodSeconds: {}
manager: kubectl
operation: Update
time: "2020-08-27T13:26:30Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:cni.projectcalico.org/podIP: {}
f:cni.projectcalico.org/podIPs: {}
manager: calico
operation: Update
time: "2020-08-27T13:26:32Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:conditions:
k:{"type":"ContainersReady"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
k:{"type":"Initialized"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
k:{"type":"Ready"}:
.: {}
f:lastProbeTime: {}
f:lastTransitionTime: {}
f:status: {}
f:type: {}
f:containerStatuses: {}
f:hostIP: {}
f:phase: {}
f:podIP: {}
f:podIPs:
.: {}
k:{"ip":"192.168.180.194"}:
.: {}
f:ip: {}
f:startTime: {}
manager: kubelet
operation: Update
time: "2020-08-27T22:59:35Z"
name: gateway-pod
namespace: default
resourceVersion: "823325"
selfLink: /api/v1/namespaces/default/pods/gateway-pod
uid: 8b3cc542-2f55-4b67-9145-a76504ceda24
spec:
containers:
- image: docker.io/oussemabhouri/gateway
imagePullPolicy: Always
name: gateway-pod
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: default-token-zzxng
readOnly: true
dnsPolicy: ClusterFirst
enableServiceLinks: true
nodeName: worker-node1
priority: 0
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: default-token-zzxng
secret:
defaultMode: 420
secretName: default-token-zzxng
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2020-08-27T13:26:21Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2020-08-27T22:59:25Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2020-08-27T22:59:25Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2020-08-27T13:26:30Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: docker://84997c12877f9f0c6345898322de7574cdb1724e2b5cef44083e7c91d77fb204
image: oussemabhouri/gateway:latest
imageID: docker-pullable://oussemabhouri/gateway@sha256:a6f3a95fef13ef63f906d1b6a37771cf97441a1737347ea5221ca66ce06fa2e3
lastState:
terminated:
containerID: docker://2fc7735f52eed6ae0783c3075ce8e96f6f5984aa5729ab0e61b42377663f5288
exitCode: 1
finishedAt: "2020-08-27T22:54:21Z"
reason: Error
startedAt: "2020-08-27T22:50:57Z"
name: gateway-pod
ready: true
restartCount: 70
started: true
state:
running:
startedAt: "2020-08-27T22:59:25Z"
hostIP: 10.66.12.204
phase: Running
podIP: 192.168.180.194
podIPs:
- ip: 192.168.180.194
qosClass: BestEffort
startTime: "2020-08-27T13:26:21Z"
kind: List
metadata:
resourceVersion: ""
selfLink: ""
关于广告连播说明:
[root@master-node ~]# kubectl describe pod gateway-pod
Name: gateway-pod
Namespace: default
Priority: 0
Node: worker-node1/10.66.12.204
Start Time: Thu, 27 Aug 2020 09:26:21 -0400
Labels: run=gateway-pod
Annotations: cni.projectcalico.org/podIP: 192.168.180.194/32
cni.projectcalico.org/podIPs: 192.168.180.194/32
Status: Running
IP: 192.168.180.194
IPs:
IP: 192.168.180.194
Containers:
gateway-pod:
Container ID: docker://c70e09b5206f8affcbf18dd55fec47eedda6079052d1fdf5dbfa16ddfe2633fc
Image: docker.io/oussemabhouri/gateway
Image ID: docker-pullable://oussemabhouri/gateway@sha256:a6f3a95fef13ef63f906d1b6a37771cf97441a1737347ea5221ca66ce06fa2e3
Port: <none>
Host Port: <none>
State: Running
Started: Fri, 28 Aug 2020 06:05:50 -0400
Last State: Terminated
Reason: Error
Exit Code: 1
Started: Fri, 28 Aug 2020 05:57:18 -0400
Finished: Fri, 28 Aug 2020 06:00:41 -0400
Ready: True
Restart Count: 148
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-zzxng (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-zzxng:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-zzxng
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning BackOff 6m15s (x3352 over 20h) kubelet, worker-node1 Back-off restarting failed container