kubectl は Pod READY 0/1 状態を取得します。

kubectl は Pod READY 0/1 状態を取得します。

KubernetesとMongodbの研究室に従いますが、すべてのポッドは常に0/1状態です。これはどういう意味ですか?どうやって準備しますか? 1/1

[root@master-node ~]# kubectl get pod
NAME                                 READY   STATUS    RESTARTS   AGE
mongo-express-78fcf796b8-wzgvx       0/1     Pending   0          3m41s
mongodb-deployment-8f6675bc5-qxj4g   0/1     Pending   0          160m
nginx-deployment-64bd7b69c-wp79g     0/1     Pending   0          4h44m

kubectlはポッドnginx-deployment-64bd7b69c-wp79g -o yamlを取得します。

[root@master-node ~]# kubectl get pod nginx-deployment-64bd7b69c-wp79g -o yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2021-07-27T17:35:57Z"
  generateName: nginx-deployment-64bd7b69c-
  labels:
    app: nginx
    pod-template-hash: 64bd7b69c
  name: nginx-deployment-64bd7b69c-wp79g
  namespace: default
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: nginx-deployment-64bd7b69c
    uid: 5b1250dd-a209-44be-9efb-7cf5a63a02a3
  resourceVersion: "15912"
  uid: d71047b4-d0e6-4d25-bb28-c410639a82ad
spec:
  containers:
  - image: nginx:1.14.2
    imagePullPolicy: IfNotPresent
    name: nginx
    ports:
    - containerPort: 8080
      protocol: TCP
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-2zr6k
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: default
  serviceAccountName: default
  terminationGracePeriodSeconds: 30
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: kube-api-access-2zr6k
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2021-07-27T17:35:57Z"
    message: '0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master:
      }, that the pod didn''t tolerate.'
    reason: Unschedulable
    status: "False"
    type: PodScheduled
  phase: Pending
  qosClass: BestEffort

kubectl は nginx-deployment-64bd7b69c-wp79g ポッドを記述します。

[root@master-node ~]# kubectl get pod nginx-deployment-64bd7b69c-wp79g -o yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2021-07-27T17:35:57Z"
  generateName: nginx-deployment-64bd7b69c-
  labels:
    app: nginx
    pod-template-hash: 64bd7b69c
  name: nginx-deployment-64bd7b69c-wp79g
  namespace: default
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: nginx-deployment-64bd7b69c
    uid: 5b1250dd-a209-44be-9efb-7cf5a63a02a3
  resourceVersion: "15912"
  uid: d71047b4-d0e6-4d25-bb28-c410639a82ad
spec:
  containers:
  - image: nginx:1.14.2
    imagePullPolicy: IfNotPresent
    name: nginx
    ports:
    - containerPort: 8080
      protocol: TCP
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-2zr6k
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: default
  serviceAccountName: default
  terminationGracePeriodSeconds: 30
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: kube-api-access-2zr6k
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2021-07-27T17:35:57Z"
    message: '0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master:
      }, that the pod didn''t tolerate.'
    reason: Unschedulable
    status: "False"
    type: PodScheduled
  phase: Pending
  qosClass: BestEffort
[root@master-node ~]# kubectl describe pod nginx-deployment-64bd7b69c-wp79g
Name:           nginx-deployment-64bd7b69c-wp79g
Namespace:      default
Priority:       0
Node:           <none>
Labels:         app=nginx
                pod-template-hash=64bd7b69c
Annotations:    <none>
Status:         Pending
IP:
IPs:            <none>
Controlled By:  ReplicaSet/nginx-deployment-64bd7b69c
Containers:
  nginx:
    Image:        nginx:1.14.2
    Port:         8080/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-2zr6k (ro)
Conditions:
  Type           Status
  PodScheduled   False
Volumes:
  kube-api-access-2zr6k:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age                   From               Message
  ----     ------            ----                  ----               -------
  Warning  FailedScheduling  2m53s (x485 over 8h)  default-scheduler  0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.

答え1

K8sクラスタにサーバーが1つしかないようです。一般的な K8s クラスタでは、マスターノードまたは制御プレーンは通常、ワークロードを実行しているサーバーから分離されています。この目的のために、基本的にコツを押し出す属性である「汚染」があります。汚染が発生した場合、マスターはポッドを予約できません。

出力の「status.conditions.message」要素にこの情報を表示できますkubectl get pod

message: '0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master:}, that the pod didn't tolerate.'

ポッドは許容誤差を定義して、その汚染があるノードでポッドを予約できます。メカニズムはドキュメントで詳しく説明されています。https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/

公差構成は次のようにする必要があります(テストされていません)。

tolerations:
- key: "node-role.kubernetes.io/master"
  operator: "Exists"
  effect: "NoSchedule"

お客様の場合は、上記の方法を使用する方が簡単です。この問題nodeName: masterポッド定義に明示的な要素を指定します。これにより、汚染メカニズムをスキップしてポッドを予約できます。

別のオプションは、以下に説明するようにマスターノードから汚染を除去することです。https://stackoverflow.com/q/43147941

関連情報