-
Notifications
You must be signed in to change notification settings - Fork 7
Open
Description
root@isv-vteves-169-10-105-16-5platform9:/tmp# /opt/bin/kubectl describe pods kube-dns-86f4d74b45-fjpd4 --namespace=kube-system
Name: kube-dns-86f4d74b45-fjpd4
Namespace: kube-system
Node: 10.105.16.5/10.105.16.5
Start Time: Thu, 29 Nov 2018 22:46:36 +0000
Labels: k8s-app=kube-dns
pod-template-hash=4290830601
Annotations: <none>
Status: Running
IP: 10.99.0.2
Controlled By: ReplicaSet/kube-dns-86f4d74b45
Containers:
kubedns:
Container ID: docker://6b1ac528519e60add23bfa9caac676880b2060a309ff0c34971ae751ae15edfb
Image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
Image ID: docker://sha256:80cc5ea4b547abe174d7550b82825ace40769e977cde90495df3427b3a0f4e75
Ports: 10053/UDP, 10053/TCP, 10055/TCP
Host Ports: 0/UDP, 0/TCP, 0/TCP
Args:
--domain=cluster.local.
--dns-port=10053
--config-dir=/kube-dns-config
--v=2
State: Running
Started: Fri, 30 Nov 2018 00:21:08 +0000
Last State: Terminated
Reason: Error
Exit Code: 137
Started: Fri, 30 Nov 2018 00:18:57 +0000
Finished: Fri, 30 Nov 2018 00:21:07 +0000
Ready: True
Restart Count: 6
Limits:
memory: 170Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:10054/healthcheck/kubedns delay=60s timeout=5s period=10s #success=1 #failure=5
Readiness: http-get http://:8081/readiness delay=3s timeout=5s period=10s #success=1 #failure=3
Environment:
PROMETHEUS_PORT: 10055
Mounts:
/kube-dns-config from kube-dns-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-pmvds (ro)
dnsmasq:
Container ID: docker://36250a7b298ea93764a5aa56381667994fb61d9a2d73b7092144994194c31574
Image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
Image ID: docker://sha256:c2ce1ffb51ed60c54057f53b8756231f5b4b792ce04113c6755339a1beb25943
Ports: 53/UDP, 53/TCP
Host Ports: 0/UDP, 0/TCP
Args:
-v=2
-logtostderr
-configDir=/etc/k8s/dns/dnsmasq-nanny
-restartDnsmasq=true
--
-k
--cache-size=1000
--no-negcache
--log-facility=-
--server=/cluster.local/127.0.0.1#10053
--server=/in-addr.arpa/127.0.0.1#10053
--server=/ip6.arpa/127.0.0.1#10053
State: Running
Started: Fri, 30 Nov 2018 00:24:41 +0000
Last State: Terminated
Reason: Error
Exit Code: 137
Started: Fri, 30 Nov 2018 00:17:40 +0000
Finished: Fri, 30 Nov 2018 00:19:41 +0000
Ready: True
Restart Count: 14
Requests:
cpu: 150m
memory: 20Mi
Liveness: http-get http://:10054/healthcheck/dnsmasq delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/etc/k8s/dns/dnsmasq-nanny from kube-dns-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-pmvds (ro)
sidecar:
Container ID: docker://6e55d4b8703d5db1a3637a131664f772b9bcf612c60726df76228af341b0c25d
Image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
Image ID: docker://sha256:6f7f2dc7fab5d7e7f99dc4ac176683a981a9ff911d643b9f29ffa146838deda3
Port: 10054/TCP
Host Port: 0/TCP
Args:
--v=2
--logtostderr
--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
State: Running
Started: Fri, 30 Nov 2018 00:20:36 +0000
Last State: Terminated
Reason: Error
Exit Code: 2
Started: Fri, 30 Nov 2018 00:14:18 +0000
Finished: Fri, 30 Nov 2018 00:17:43 +0000
Ready: True
Restart Count: 11
Requests:
cpu: 10m
memory: 20Mi
Liveness: http-get http://:10054/metrics delay=60s timeout=5s period=10s #success=1 #failure=5
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-dns-token-pmvds (ro)
Conditions:
Type Status
Initialized True
Ready True
PodScheduled True
Volumes:
kube-dns-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: kube-dns
Optional: true
kube-dns-token-pmvds:
Type: Secret (a volume populated by a Secret)
SecretName: kube-dns-token-pmvds
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: CriticalAddonsOnly
node-role.kubernetes.io/master:NoSchedule
node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Pulled 29m (x9 over 1h) kubelet, 10.105.16.5 Container image "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8" already present on machine
Normal Created 29m (x9 over 1h) kubelet, 10.105.16.5 Created container
Normal Killing 29m (x8 over 1h) kubelet, 10.105.16.5 Killing container with id docker://dnsmasq:Container failed liveness probe.. Container will be killed and recreated.
Normal Started 29m (x9 over 1h) kubelet, 10.105.16.5 Started container
Normal Pulled 29m (x7 over 1h) kubelet, 10.105.16.5 Container image "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8" already present on machine
Normal Created 29m (x7 over 1h) kubelet, 10.105.16.5 Created container
Warning Unhealthy 21m (x40 over 1h) kubelet, 10.105.16.5 Liveness probe failed: HTTP probe failed with statuscode: 503
Warning BackOff 11m (x40 over 1h) kubelet, 10.105.16.5 Back-off restarting failed container
The dnsmasq container above has the following liveness probe:
Liveness: http-get http://:10054/metrics delay=60s timeout=5s period=10s #success=1 #failure=5
Metadata
Metadata
Assignees
Labels
No labels