diff --git a/v1.13/scaleway/PRODUCT.yaml b/v1.13/scaleway/PRODUCT.yaml new file mode 100644 index 0000000000..0f30f47809 --- /dev/null +++ b/v1.13/scaleway/PRODUCT.yaml @@ -0,0 +1,8 @@ +vendor: Scaleway +name: Scaleway Kubernetes Kapsule +version: v1.13.1 +website_url: http://www.scaleway.com/en/kubernetes/ +documentation_url: https://developers.scaleway.com/en/products/k8s/api/ +product_logo_url: https://www.scaleway.com/assets/images/logos/scaleway_logo_2018.svg +type: hosted +description: 'Kubernetes Kapsule provides a simple way to deploy and manage your containerized applications in the cloud. Relax and focus on your software stack while we take care of your clusters.' diff --git a/v1.13/scaleway/README.md b/v1.13/scaleway/README.md new file mode 100644 index 0000000000..96fe342d94 --- /dev/null +++ b/v1.13/scaleway/README.md @@ -0,0 +1,27 @@ +# Conformance tests for Scaleway's Kubernetes + +## Setup the Scaleway Kubernetes cluster + +First create a cluster as mentionned in the [documentation](https://developers.scaleway.com/en/products/k8s/api/), download the kubeconfig and configure `kubectl` to use this config file. + +## Run Conformance Test + +1. Download a binary release of [sonobuoy](https://github.com/heptio/sonobuoy/releases), or build it yourself by running: +```sh +$ go get -u -v github.com/heptio/sonobuoy +``` + +2. Run sonobuoy: +```sh +$ sonobuoy run +``` + +3. Check the status: +```sh +$ sonobuoy status +``` + +4. Once the status shows the run as completed, you can download the results archive by running: +```sh +$ sonobuoy retrieve +``` diff --git a/v1.13/scaleway/e2e.log b/v1.13/scaleway/e2e.log new file mode 100644 index 0000000000..f7043fca5f --- /dev/null +++ b/v1.13/scaleway/e2e.log @@ -0,0 +1,10536 @@ +I0529 18:15:29.769018 19 test_context.go:358] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-329215334 +I0529 18:15:29.769096 19 e2e.go:224] Starting e2e run "bd439626-823d-11e9-bd6e-667e8fbec69d" on Ginkgo node 1 +Running Suite: Kubernetes e2e suite +=================================== +Random Seed: 1559153729 - Will randomize all specs +Will run 201 of 1946 specs + +May 29 18:15:29.938: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +May 29 18:15:29.942: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +May 29 18:15:29.988: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +May 29 18:15:30.044: INFO: 11 / 11 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +May 29 18:15:30.044: INFO: expected 5 pod replicas in namespace 'kube-system', 5 are Running and Ready. +May 29 18:15:30.044: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +May 29 18:15:30.059: INFO: 2 / 2 pods ready in namespace 'kube-system' in daemonset 'flannel' (0 seconds elapsed) +May 29 18:15:30.059: INFO: 2 / 2 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) +May 29 18:15:30.059: INFO: 2 / 2 pods ready in namespace 'kube-system' in daemonset 'node-problem-detector' (0 seconds elapsed) +May 29 18:15:30.059: INFO: e2e test version: v1.13.0 +May 29 18:15:30.063: INFO: kube-apiserver version: v1.13.1 +SSS +------------------------------ +[sig-storage] Projected secret + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:15:30.063: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +May 29 18:15:30.325: INFO: Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled. +May 29 18:15:30.531: INFO: Found ClusterRoles; assuming RBAC is enabled. +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-4j6sm +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name projected-secret-test-be35456c-823d-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume secrets +May 29 18:15:30.746: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-4j6sm" to be "success or failure" +May 29 18:15:30.752: INFO: Pod "pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.000155ms +May 29 18:15:32.759: INFO: Pod "pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012817437s +May 29 18:15:34.766: INFO: Pod "pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019972664s +STEP: Saw pod success +May 29 18:15:34.766: INFO: Pod "pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:15:34.772: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d container secret-volume-test: +STEP: delete the pod +May 29 18:15:34.830: INFO: Waiting for pod pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d to disappear +May 29 18:15:34.836: INFO: Pod pod-projected-secrets-be365437-823d-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:15:34.836: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-4j6sm" for this suite. +May 29 18:15:40.872: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:15:41.121: INFO: namespace: e2e-tests-projected-4j6sm, resource: bindings, ignored listing per whitelist +May 29 18:15:41.150: INFO: namespace e2e-tests-projected-4j6sm deletion completed in 6.30798819s + +• [SLOW TEST:11.087 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-apps] Daemon set [Serial] + should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:15:41.152: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename daemonsets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-8qkvd +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +May 29 18:15:41.516: INFO: Number of nodes with available pods: 0 +May 29 18:15:41.516: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:42.531: INFO: Number of nodes with available pods: 0 +May 29 18:15:42.531: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:43.531: INFO: Number of nodes with available pods: 0 +May 29 18:15:43.531: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:44.530: INFO: Number of nodes with available pods: 1 +May 29 18:15:44.531: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:45.532: INFO: Number of nodes with available pods: 1 +May 29 18:15:45.532: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:46.531: INFO: Number of nodes with available pods: 1 +May 29 18:15:46.531: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:47.532: INFO: Number of nodes with available pods: 1 +May 29 18:15:47.532: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:48.531: INFO: Number of nodes with available pods: 1 +May 29 18:15:48.531: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:49.533: INFO: Number of nodes with available pods: 1 +May 29 18:15:49.533: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod +May 29 18:15:50.627: INFO: Number of nodes with available pods: 2 +May 29 18:15:50.627: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. +May 29 18:15:50.664: INFO: Number of nodes with available pods: 1 +May 29 18:15:50.664: INFO: Node scw-sono13-default-71171af685174eada6c25c1541e is running more than one daemon pod +May 29 18:15:51.685: INFO: Number of nodes with available pods: 1 +May 29 18:15:51.685: INFO: Node scw-sono13-default-71171af685174eada6c25c1541e is running more than one daemon pod +May 29 18:15:52.677: INFO: Number of nodes with available pods: 1 +May 29 18:15:52.677: INFO: Node scw-sono13-default-71171af685174eada6c25c1541e is running more than one daemon pod +May 29 18:15:53.680: INFO: Number of nodes with available pods: 2 +May 29 18:15:53.680: INFO: Number of running nodes: 2, number of available pods: 2 +STEP: Wait for the failed daemon pod to be completely deleted. +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-8qkvd, will wait for the garbage collector to delete the pods +May 29 18:15:53.758: INFO: Deleting DaemonSet.extensions daemon-set took: 11.187539ms +May 29 18:15:53.859: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.383417ms +May 29 18:16:33.926: INFO: Number of nodes with available pods: 0 +May 29 18:16:33.926: INFO: Number of running nodes: 0, number of available pods: 0 +May 29 18:16:34.023: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-8qkvd/daemonsets","resourceVersion":"948687722"},"items":null} + +May 29 18:16:34.031: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-8qkvd/pods","resourceVersion":"948687728"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:16:34.049: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-8qkvd" for this suite. +May 29 18:16:40.142: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:16:40.360: INFO: namespace: e2e-tests-daemonsets-8qkvd, resource: bindings, ignored listing per whitelist +May 29 18:16:40.368: INFO: namespace e2e-tests-daemonsets-8qkvd deletion completed in 6.248037553s + +• [SLOW TEST:59.216 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-storage] Secrets + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:16:40.368: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-q26gx +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name s-test-opt-del-e7dff282-823d-11e9-bd6e-667e8fbec69d +STEP: Creating secret with name s-test-opt-upd-e7dff2f1-823d-11e9-bd6e-667e8fbec69d +STEP: Creating the pod +STEP: Deleting secret s-test-opt-del-e7dff282-823d-11e9-bd6e-667e8fbec69d +STEP: Updating secret s-test-opt-upd-e7dff2f1-823d-11e9-bd6e-667e8fbec69d +STEP: Creating secret with name s-test-opt-create-e7dff326-823d-11e9-bd6e-667e8fbec69d +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:17:53.656: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-q26gx" for this suite. +May 29 18:18:15.687: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:18:15.983: INFO: namespace: e2e-tests-secrets-q26gx, resource: bindings, ignored listing per whitelist +May 29 18:18:15.988: INFO: namespace e2e-tests-secrets-q26gx deletion completed in 22.325524034s + +• [SLOW TEST:95.620 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[k8s.io] Probing container + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:18:15.989: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-psdxv +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod liveness-exec in namespace e2e-tests-container-probe-psdxv +May 29 18:18:22.288: INFO: Started pod liveness-exec in namespace e2e-tests-container-probe-psdxv +STEP: checking the pod's current state and verifying that restartCount is present +May 29 18:18:22.295: INFO: Initial restart count of pod liveness-exec is 0 +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:22:23.720: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-psdxv" for this suite. +May 29 18:22:29.756: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:22:29.969: INFO: namespace: e2e-tests-container-probe-psdxv, resource: bindings, ignored listing per whitelist +May 29 18:22:30.072: INFO: namespace e2e-tests-container-probe-psdxv deletion completed in 6.345195551s + +• [SLOW TEST:254.083 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-network] Services + should serve a basic endpoint from pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Services + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:22:30.072: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename services +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-services-45c4z +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85 +[It] should serve a basic endpoint from pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating service endpoint-test2 in namespace e2e-tests-services-45c4z +STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-45c4z to expose endpoints map[] +May 29 18:22:30.389: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-45c4z exposes endpoints map[] (6.038916ms elapsed) +STEP: Creating pod pod1 in namespace e2e-tests-services-45c4z +STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-45c4z to expose endpoints map[pod1:[80]] +May 29 18:22:33.457: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-45c4z exposes endpoints map[pod1:[80]] (3.055920961s elapsed) +STEP: Creating pod pod2 in namespace e2e-tests-services-45c4z +STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-45c4z to expose endpoints map[pod1:[80] pod2:[80]] +May 29 18:22:36.544: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-45c4z exposes endpoints map[pod1:[80] pod2:[80]] (3.079575134s elapsed) +STEP: Deleting pod pod1 in namespace e2e-tests-services-45c4z +STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-45c4z to expose endpoints map[pod2:[80]] +May 29 18:22:37.583: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-45c4z exposes endpoints map[pod2:[80]] (1.026000864s elapsed) +STEP: Deleting pod pod2 in namespace e2e-tests-services-45c4z +STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-45c4z to expose endpoints map[] +May 29 18:22:37.598: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-45c4z exposes endpoints map[] (5.529389ms elapsed) +[AfterEach] [sig-network] Services + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:22:37.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-services-45c4z" for this suite. +May 29 18:22:59.658: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:22:59.790: INFO: namespace: e2e-tests-services-45c4z, resource: bindings, ignored listing per whitelist +May 29 18:22:59.878: INFO: namespace e2e-tests-services-45c4z deletion completed in 22.243913365s +[AfterEach] [sig-network] Services + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90 + +• [SLOW TEST:29.806 seconds] +[sig-network] Services +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should serve a basic endpoint from pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-storage] Secrets + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:22:59.879: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-n6b5j +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secret-namespace-xdtxc +STEP: Creating secret with name secret-test-ca1b5ce6-823e-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume secrets +May 29 18:23:00.356: INFO: Waiting up to 5m0s for pod "pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-n6b5j" to be "success or failure" +May 29 18:23:00.363: INFO: Pod "pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.401508ms +May 29 18:23:02.381: INFO: Pod "pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02503023s +May 29 18:23:04.396: INFO: Pod "pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039952896s +STEP: Saw pod success +May 29 18:23:04.396: INFO: Pod "pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:23:04.402: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d container secret-volume-test: +STEP: delete the pod +May 29 18:23:04.437: INFO: Waiting for pod pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d to disappear +May 29 18:23:04.444: INFO: Pod pod-secrets-ca335726-823e-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:23:04.444: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-n6b5j" for this suite. +May 29 18:23:10.480: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:23:10.557: INFO: namespace: e2e-tests-secrets-n6b5j, resource: bindings, ignored listing per whitelist +May 29 18:23:10.752: INFO: namespace e2e-tests-secrets-n6b5j deletion completed in 6.298800278s +STEP: Destroying namespace "e2e-tests-secret-namespace-xdtxc" for this suite. +May 29 18:23:16.781: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:23:16.844: INFO: namespace: e2e-tests-secret-namespace-xdtxc, resource: bindings, ignored listing per whitelist +May 29 18:23:17.000: INFO: namespace e2e-tests-secret-namespace-xdtxc deletion completed in 6.247942282s + +• [SLOW TEST:17.121 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases + should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:23:17.000: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubelet-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-thp9v +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[It] should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:23:21.322: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-thp9v" for this suite. +May 29 18:24:05.351: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:24:05.392: INFO: namespace: e2e-tests-kubelet-test-thp9v, resource: bindings, ignored listing per whitelist +May 29 18:24:05.573: INFO: namespace e2e-tests-kubelet-test-thp9v deletion completed in 44.244483628s + +• [SLOW TEST:48.573 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a busybox Pod with hostAliases + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:136 + should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not conflict [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:24:05.574: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename emptydir-wrapper +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-wrapper-gzxkz +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not conflict [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Cleaning up the secret +STEP: Cleaning up the configmap +STEP: Cleaning up the pod +[AfterEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:24:09.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-wrapper-gzxkz" for this suite. +May 29 18:24:15.960: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:24:16.066: INFO: namespace: e2e-tests-emptydir-wrapper-gzxkz, resource: bindings, ignored listing per whitelist +May 29 18:24:16.240: INFO: namespace e2e-tests-emptydir-wrapper-gzxkz deletion completed in 6.309287793s + +• [SLOW TEST:10.666 seconds] +[sig-storage] EmptyDir wrapper volumes +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + should not conflict [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:24:16.240: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-6pqjh +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-6pqjh +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a new StaefulSet +May 29 18:24:16.543: INFO: Found 0 stateful pods, waiting for 3 +May 29 18:24:26.561: INFO: Found 2 stateful pods, waiting for 3 +May 29 18:24:36.551: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +May 29 18:24:36.551: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +May 29 18:24:36.551: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine +May 29 18:24:36.627: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Not applying an update when the partition is greater than the number of replicas +STEP: Performing a canary update +May 29 18:24:46.689: INFO: Updating stateful set ss2 +May 29 18:24:46.704: INFO: Waiting for Pod e2e-tests-statefulset-6pqjh/ss2-2 to have revision ss2-c79899b9 update revision ss2-787997d666 +STEP: Restoring Pods to the correct revision when they are deleted +May 29 18:24:56.766: INFO: Found 2 stateful pods, waiting for 3 +May 29 18:25:06.786: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +May 29 18:25:06.786: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +May 29 18:25:06.786: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update +May 29 18:25:06.821: INFO: Updating stateful set ss2 +May 29 18:25:06.834: INFO: Waiting for Pod e2e-tests-statefulset-6pqjh/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666 +May 29 18:25:16.859: INFO: Waiting for Pod e2e-tests-statefulset-6pqjh/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666 +May 29 18:25:26.891: INFO: Updating stateful set ss2 +May 29 18:25:26.916: INFO: Waiting for StatefulSet e2e-tests-statefulset-6pqjh/ss2 to complete update +May 29 18:25:26.916: INFO: Waiting for Pod e2e-tests-statefulset-6pqjh/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666 +May 29 18:25:36.941: INFO: Waiting for StatefulSet e2e-tests-statefulset-6pqjh/ss2 to complete update +May 29 18:25:36.942: INFO: Waiting for Pod e2e-tests-statefulset-6pqjh/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666 +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +May 29 18:25:46.943: INFO: Deleting all statefulset in ns e2e-tests-statefulset-6pqjh +May 29 18:25:46.950: INFO: Scaling statefulset ss2 to 0 +May 29 18:26:06.978: INFO: Waiting for statefulset status.replicas updated to 0 +May 29 18:26:06.984: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:26:07.027: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-6pqjh" for this suite. +May 29 18:26:13.056: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:26:13.130: INFO: namespace: e2e-tests-statefulset-6pqjh, resource: bindings, ignored listing per whitelist +May 29 18:26:13.319: INFO: namespace e2e-tests-statefulset-6pqjh deletion completed in 6.284325184s + +• [SLOW TEST:117.079 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:26:13.320: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-hwvcl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:26:13.636: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-hwvcl" to be "success or failure" +May 29 18:26:13.646: INFO: Pod "downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 9.930718ms +May 29 18:26:15.655: INFO: Pod "downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019151356s +May 29 18:26:17.671: INFO: Pod "downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034986871s +STEP: Saw pod success +May 29 18:26:17.671: INFO: Pod "downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:26:17.677: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:26:17.703: INFO: Waiting for pod downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d to disappear +May 29 18:26:17.709: INFO: Pod downwardapi-volume-3d675fe3-823f-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:26:17.709: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-hwvcl" for this suite. +May 29 18:26:23.736: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:26:23.980: INFO: namespace: e2e-tests-downward-api-hwvcl, resource: bindings, ignored listing per whitelist +May 29 18:26:24.008: INFO: namespace e2e-tests-downward-api-hwvcl deletion completed in 6.29169676s + +• [SLOW TEST:10.688 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:26:24.008: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-4bqnb +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0644,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0644 on node default medium +May 29 18:26:24.283: INFO: Waiting up to 5m0s for pod "pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-4bqnb" to be "success or failure" +May 29 18:26:24.289: INFO: Pod "pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.757065ms +May 29 18:26:26.297: INFO: Pod "pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013480357s +May 29 18:26:28.313: INFO: Pod "pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029137963s +STEP: Saw pod success +May 29 18:26:28.313: INFO: Pod "pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:26:28.319: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d container test-container: +STEP: delete the pod +May 29 18:26:28.347: INFO: Waiting for pod pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d to disappear +May 29 18:26:28.354: INFO: Pod pod-43bfbee6-823f-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:26:28.354: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-4bqnb" for this suite. +May 29 18:26:34.422: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:26:35.124: INFO: namespace: e2e-tests-emptydir-4bqnb, resource: bindings, ignored listing per whitelist +May 29 18:26:35.354: INFO: namespace e2e-tests-emptydir-4bqnb deletion completed in 6.993711259s + +• [SLOW TEST:11.346 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0644,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:26:35.354: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-p49d8 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-4a9883aa-823f-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:26:35.775: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-p49d8" to be "success or failure" +May 29 18:26:35.782: INFO: Pod "pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.573088ms +May 29 18:26:37.790: INFO: Pod "pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01535809s +May 29 18:26:39.805: INFO: Pod "pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030126572s +STEP: Saw pod success +May 29 18:26:39.805: INFO: Pod "pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:26:39.810: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d container projected-configmap-volume-test: +STEP: delete the pod +May 29 18:26:39.838: INFO: Waiting for pod pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d to disappear +May 29 18:26:39.844: INFO: Pod pod-projected-configmaps-4a999e3b-823f-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:26:39.844: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-p49d8" for this suite. +May 29 18:26:45.872: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:26:45.988: INFO: namespace: e2e-tests-projected-p49d8, resource: bindings, ignored listing per whitelist +May 29 18:26:46.135: INFO: namespace e2e-tests-projected-p49d8 deletion completed in 6.284659343s + +• [SLOW TEST:10.781 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:26:46.136: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-8tbb4 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with secret that has name projected-secret-test-50ef513f-823f-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume secrets +May 29 18:26:46.411: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-8tbb4" to be "success or failure" +May 29 18:26:46.418: INFO: Pod "pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.098107ms +May 29 18:26:48.427: INFO: Pod "pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015156769s +May 29 18:26:50.440: INFO: Pod "pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028461808s +STEP: Saw pod success +May 29 18:26:50.440: INFO: Pod "pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:26:50.446: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d container projected-secret-volume-test: +STEP: delete the pod +May 29 18:26:50.472: INFO: Waiting for pod pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d to disappear +May 29 18:26:50.477: INFO: Pod pod-projected-secrets-50f0552e-823f-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:26:50.477: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-8tbb4" for this suite. +May 29 18:26:56.506: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:26:56.594: INFO: namespace: e2e-tests-projected-8tbb4, resource: bindings, ignored listing per whitelist +May 29 18:26:56.789: INFO: namespace e2e-tests-projected-8tbb4 deletion completed in 6.305521317s + +• [SLOW TEST:10.653 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl expose + should create services for rc [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:26:56.789: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-6tqwg +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should create services for rc [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating Redis RC +May 29 18:26:57.049: INFO: namespace e2e-tests-kubectl-6tqwg +May 29 18:26:57.050: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-6tqwg' +May 29 18:26:57.785: INFO: stderr: "" +May 29 18:26:57.786: INFO: stdout: "replicationcontroller/redis-master created\n" +STEP: Waiting for Redis master to start. +May 29 18:26:58.792: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:26:58.792: INFO: Found 0 / 1 +May 29 18:26:59.822: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:26:59.822: INFO: Found 0 / 1 +May 29 18:27:00.800: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:27:00.800: INFO: Found 1 / 1 +May 29 18:27:00.800: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +May 29 18:27:00.807: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:27:00.807: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +May 29 18:27:00.807: INFO: wait on redis-master startup in e2e-tests-kubectl-6tqwg +May 29 18:27:00.807: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 logs redis-master-8978z redis-master --namespace=e2e-tests-kubectl-6tqwg' +May 29 18:27:00.954: INFO: stderr: "" +May 29 18:27:00.954: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 29 May 18:27:00.421 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 29 May 18:27:00.421 # Server started, Redis version 3.2.12\n1:M 29 May 18:27:00.421 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 29 May 18:27:00.421 * The server is now ready to accept connections on port 6379\n" +STEP: exposing RC +May 29 18:27:00.954: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 expose rc redis-master --name=rm2 --port=1234 --target-port=6379 --namespace=e2e-tests-kubectl-6tqwg' +May 29 18:27:01.110: INFO: stderr: "" +May 29 18:27:01.110: INFO: stdout: "service/rm2 exposed\n" +May 29 18:27:01.116: INFO: Service rm2 in namespace e2e-tests-kubectl-6tqwg found. +STEP: exposing service +May 29 18:27:03.129: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 expose service rm2 --name=rm3 --port=2345 --target-port=6379 --namespace=e2e-tests-kubectl-6tqwg' +May 29 18:27:03.268: INFO: stderr: "" +May 29 18:27:03.268: INFO: stdout: "service/rm3 exposed\n" +May 29 18:27:03.275: INFO: Service rm3 in namespace e2e-tests-kubectl-6tqwg found. +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:27:05.288: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-6tqwg" for this suite. +May 29 18:27:27.315: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:27:27.344: INFO: namespace: e2e-tests-kubectl-6tqwg, resource: bindings, ignored listing per whitelist +May 29 18:27:27.606: INFO: namespace e2e-tests-kubectl-6tqwg deletion completed in 22.311464731s + +• [SLOW TEST:30.817 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl expose + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create services for rc [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:27:27.608: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-6d592 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-map-69ab9318-823f-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:27:27.914: INFO: Waiting up to 5m0s for pod "pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-6d592" to be "success or failure" +May 29 18:27:27.921: INFO: Pod "pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.482378ms +May 29 18:27:29.928: INFO: Pod "pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014305336s +May 29 18:27:31.936: INFO: Pod "pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021916698s +STEP: Saw pod success +May 29 18:27:31.936: INFO: Pod "pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:27:31.943: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d container configmap-volume-test: +STEP: delete the pod +May 29 18:27:31.973: INFO: Waiting for pod pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d to disappear +May 29 18:27:31.979: INFO: Pod pod-configmaps-69ad2581-823f-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:27:31.979: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-6d592" for this suite. +May 29 18:27:38.012: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:27:38.040: INFO: namespace: e2e-tests-configmap-6d592, resource: bindings, ignored listing per whitelist +May 29 18:27:38.314: INFO: namespace e2e-tests-configmap-6d592 deletion completed in 6.327003281s + +• [SLOW TEST:10.706 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod with mountPath of existing file [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:27:38.314: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename subpath +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-q9v5v +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod with mountPath of existing file [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod pod-subpath-test-configmap-ls66 +STEP: Creating a pod to test atomic-volume-subpath +May 29 18:27:38.617: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-ls66" in namespace "e2e-tests-subpath-q9v5v" to be "success or failure" +May 29 18:27:38.623: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Pending", Reason="", readiness=false. Elapsed: 6.194968ms +May 29 18:27:40.635: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017809118s +May 29 18:27:42.642: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Pending", Reason="", readiness=false. Elapsed: 4.024733986s +May 29 18:27:44.665: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Pending", Reason="", readiness=false. Elapsed: 6.047862247s +May 29 18:27:46.673: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 8.056166335s +May 29 18:27:48.681: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 10.063813551s +May 29 18:27:50.689: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 12.071615294s +May 29 18:27:52.696: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 14.07857105s +May 29 18:27:54.713: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 16.095720535s +May 29 18:27:56.722: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 18.104665797s +May 29 18:27:58.730: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 20.112412065s +May 29 18:28:00.737: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 22.119779553s +May 29 18:28:02.745: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 24.127737098s +May 29 18:28:04.760: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Running", Reason="", readiness=false. Elapsed: 26.142888868s +May 29 18:28:06.767: INFO: Pod "pod-subpath-test-configmap-ls66": Phase="Succeeded", Reason="", readiness=false. Elapsed: 28.150264739s +STEP: Saw pod success +May 29 18:28:06.768: INFO: Pod "pod-subpath-test-configmap-ls66" satisfied condition "success or failure" +May 29 18:28:06.776: INFO: Trying to get logs from node scw-sono13-default-2865dd8133304358ae8da697bb2 pod pod-subpath-test-configmap-ls66 container test-container-subpath-configmap-ls66: +STEP: delete the pod +May 29 18:28:06.846: INFO: Waiting for pod pod-subpath-test-configmap-ls66 to disappear +May 29 18:28:06.858: INFO: Pod pod-subpath-test-configmap-ls66 no longer exists +STEP: Deleting pod pod-subpath-test-configmap-ls66 +May 29 18:28:06.858: INFO: Deleting pod "pod-subpath-test-configmap-ls66" in namespace "e2e-tests-subpath-q9v5v" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:28:06.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-subpath-q9v5v" for this suite. +May 29 18:28:12.893: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:28:13.054: INFO: namespace: e2e-tests-subpath-q9v5v, resource: bindings, ignored listing per whitelist +May 29 18:28:13.208: INFO: namespace e2e-tests-subpath-q9v5v deletion completed in 6.336223954s + +• [SLOW TEST:34.894 seconds] +[sig-storage] Subpath +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod with mountPath of existing file [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not cause race condition when used for configmaps [Serial] [Slow] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:28:13.208: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename emptydir-wrapper +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-wrapper-kxlgz +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not cause race condition when used for configmaps [Serial] [Slow] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating 50 configmaps +STEP: Creating RC which spawns configmap-volume pods +May 29 18:28:13.846: INFO: Pod name wrapped-volume-race-850c478a-823f-11e9-bd6e-667e8fbec69d: Found 0 pods out of 5 +May 29 18:28:18.867: INFO: Pod name wrapped-volume-race-850c478a-823f-11e9-bd6e-667e8fbec69d: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-850c478a-823f-11e9-bd6e-667e8fbec69d in namespace e2e-tests-emptydir-wrapper-kxlgz, will wait for the garbage collector to delete the pods +May 29 18:28:29.007: INFO: Deleting ReplicationController wrapped-volume-race-850c478a-823f-11e9-bd6e-667e8fbec69d took: 14.620056ms +May 29 18:28:29.107: INFO: Terminating ReplicationController wrapped-volume-race-850c478a-823f-11e9-bd6e-667e8fbec69d pods took: 100.347486ms +STEP: Creating RC which spawns configmap-volume pods +May 29 18:29:07.342: INFO: Pod name wrapped-volume-race-a4ed5d65-823f-11e9-bd6e-667e8fbec69d: Found 0 pods out of 5 +May 29 18:29:12.355: INFO: Pod name wrapped-volume-race-a4ed5d65-823f-11e9-bd6e-667e8fbec69d: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-a4ed5d65-823f-11e9-bd6e-667e8fbec69d in namespace e2e-tests-emptydir-wrapper-kxlgz, will wait for the garbage collector to delete the pods +May 29 18:29:24.466: INFO: Deleting ReplicationController wrapped-volume-race-a4ed5d65-823f-11e9-bd6e-667e8fbec69d took: 10.824395ms +May 29 18:29:24.566: INFO: Terminating ReplicationController wrapped-volume-race-a4ed5d65-823f-11e9-bd6e-667e8fbec69d pods took: 100.252878ms +STEP: Creating RC which spawns configmap-volume pods +May 29 18:30:10.203: INFO: Pod name wrapped-volume-race-ca64d2a4-823f-11e9-bd6e-667e8fbec69d: Found 0 pods out of 5 +May 29 18:30:15.216: INFO: Pod name wrapped-volume-race-ca64d2a4-823f-11e9-bd6e-667e8fbec69d: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-ca64d2a4-823f-11e9-bd6e-667e8fbec69d in namespace e2e-tests-emptydir-wrapper-kxlgz, will wait for the garbage collector to delete the pods +May 29 18:30:25.336: INFO: Deleting ReplicationController wrapped-volume-race-ca64d2a4-823f-11e9-bd6e-667e8fbec69d took: 23.131677ms +May 29 18:30:25.436: INFO: Terminating ReplicationController wrapped-volume-race-ca64d2a4-823f-11e9-bd6e-667e8fbec69d pods took: 100.255011ms +STEP: Cleaning up the configMaps +[AfterEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:31:10.697: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-wrapper-kxlgz" for this suite. +May 29 18:31:18.731: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:31:18.872: INFO: namespace: e2e-tests-emptydir-wrapper-kxlgz, resource: bindings, ignored listing per whitelist +May 29 18:31:19.042: INFO: namespace e2e-tests-emptydir-wrapper-kxlgz deletion completed in 8.337656129s + +• [SLOW TEST:185.833 seconds] +[sig-storage] EmptyDir wrapper volumes +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + should not cause race condition when used for configmaps [Serial] [Slow] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[k8s.io] Probing container + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:31:19.042: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-k8hdg +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod liveness-exec in namespace e2e-tests-container-probe-k8hdg +May 29 18:31:23.381: INFO: Started pod liveness-exec in namespace e2e-tests-container-probe-k8hdg +STEP: checking the pod's current state and verifying that restartCount is present +May 29 18:31:23.390: INFO: Initial restart count of pod liveness-exec is 0 +May 29 18:32:13.980: INFO: Restart count of pod e2e-tests-container-probe-k8hdg/liveness-exec is now 1 (50.58990298s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:32:13.994: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-k8hdg" for this suite. +May 29 18:32:20.021: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:32:20.273: INFO: namespace: e2e-tests-container-probe-k8hdg, resource: bindings, ignored listing per whitelist +May 29 18:32:20.335: INFO: namespace e2e-tests-container-probe-k8hdg deletion completed in 6.333014786s + +• [SLOW TEST:61.293 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:32:20.335: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-xbh55 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:32:20.654: INFO: Waiting up to 5m0s for pod "downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-xbh55" to be "success or failure" +May 29 18:32:20.659: INFO: Pod "downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.443747ms +May 29 18:32:22.674: INFO: Pod "downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020098254s +May 29 18:32:24.680: INFO: Pod "downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026845435s +STEP: Saw pod success +May 29 18:32:24.681: INFO: Pod "downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:32:24.687: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:32:24.723: INFO: Waiting for pod downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:32:24.729: INFO: Pod downwardapi-volume-1829df54-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:32:24.729: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-xbh55" for this suite. +May 29 18:32:30.756: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:32:31.046: INFO: namespace: e2e-tests-projected-xbh55, resource: bindings, ignored listing per whitelist +May 29 18:32:31.071: INFO: namespace e2e-tests-projected-xbh55 deletion completed in 6.336376021s + +• [SLOW TEST:10.736 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-apps] Deployment + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:32:31.072: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename deployment +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-f7w2z +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 29 18:32:31.348: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) +May 29 18:32:31.362: INFO: Pod name sample-pod: Found 0 pods out of 1 +May 29 18:32:36.378: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +May 29 18:32:36.378: INFO: Creating deployment "test-rolling-update-deployment" +May 29 18:32:36.386: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has +May 29 18:32:36.397: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created +May 29 18:32:38.412: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected +May 29 18:32:38.419: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694751556, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694751556, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694751556, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694751556, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rolling-update-deployment-68b55d7bc6\" is progressing."}}, CollisionCount:(*int32)(nil)} +May 29 18:32:40.425: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +May 29 18:32:40.447: INFO: Deployment "test-rolling-update-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment,GenerateName:,Namespace:e2e-tests-deployment-f7w2z,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-f7w2z/deployments/test-rolling-update-deployment,UID:218b9a33-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948762033,Generation:1,CreationTimestamp:2019-05-29 18:32:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-05-29 18:32:36 +0000 UTC 2019-05-29 18:32:36 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-05-29 18:32:39 +0000 UTC 2019-05-29 18:32:36 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rolling-update-deployment-68b55d7bc6" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},} + +May 29 18:32:40.454: INFO: New ReplicaSet "test-rolling-update-deployment-68b55d7bc6" of Deployment "test-rolling-update-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-68b55d7bc6,GenerateName:,Namespace:e2e-tests-deployment-f7w2z,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-f7w2z/replicasets/test-rolling-update-deployment-68b55d7bc6,UID:218e5f34-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948762021,Generation:1,CreationTimestamp:2019-05-29 18:32:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 218b9a33-8240-11e9-9b18-c2b4512ea1b9 0xc0017e9da7 0xc0017e9da8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},} +May 29 18:32:40.454: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": +May 29 18:32:40.455: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-controller,GenerateName:,Namespace:e2e-tests-deployment-f7w2z,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-f7w2z/replicasets/test-rolling-update-controller,UID:1e8c02e0-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948762030,Generation:2,CreationTimestamp:2019-05-29 18:32:31 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305832,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 218b9a33-8240-11e9-9b18-c2b4512ea1b9 0xc0017e9ce7 0xc0017e9ce8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +May 29 18:32:40.462: INFO: Pod "test-rolling-update-deployment-68b55d7bc6-r9rzn" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-68b55d7bc6-r9rzn,GenerateName:test-rolling-update-deployment-68b55d7bc6-,Namespace:e2e-tests-deployment-f7w2z,SelfLink:/api/v1/namespaces/e2e-tests-deployment-f7w2z/pods/test-rolling-update-deployment-68b55d7bc6-r9rzn,UID:218f121e-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948762020,Generation:0,CreationTimestamp:2019-05-29 18:32:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rolling-update-deployment-68b55d7bc6 218e5f34-8240-11e9-9b18-c2b4512ea1b9 0xc00170c517 0xc00170c518}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-6ldgq {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-6ldgq,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [{default-token-6ldgq true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc00170c590} {node.kubernetes.io/unreachable Exists NoExecute 0xc00170c600}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 18:32:36 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 18:32:39 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 18:32:39 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 18:32:36 +0000 UTC }],Message:,Reason:,HostIP:10.12.149.215,PodIP:100.64.0.30,StartTime:2019-05-29 18:32:36 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-05-29 18:32:39 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://ff549e5a3ff00de8daeaf61e538c1c84a6255a9f1afedc0a72b8045098526c03}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:32:40.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-deployment-f7w2z" for this suite. +May 29 18:32:46.499: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:32:46.529: INFO: namespace: e2e-tests-deployment-f7w2z, resource: bindings, ignored listing per whitelist +May 29 18:32:46.766: INFO: namespace e2e-tests-deployment-f7w2z deletion completed in 6.297552951s + +• [SLOW TEST:15.695 seconds] +[sig-apps] Deployment +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run job + should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:32:46.767: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-2x2z9 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run job + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1454 +[It] should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +May 29 18:32:47.083: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 run e2e-test-nginx-job --restart=OnFailure --generator=job/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-2x2z9' +May 29 18:32:47.225: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +May 29 18:32:47.225: INFO: stdout: "job.batch/e2e-test-nginx-job created\n" +STEP: verifying the job e2e-test-nginx-job was created +[AfterEach] [k8s.io] Kubectl run job + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1459 +May 29 18:32:47.232: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete jobs e2e-test-nginx-job --namespace=e2e-tests-kubectl-2x2z9' +May 29 18:32:47.371: INFO: stderr: "" +May 29 18:32:47.371: INFO: stdout: "job.batch \"e2e-test-nginx-job\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:32:47.371: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-2x2z9" for this suite. +May 29 18:33:09.409: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:33:09.595: INFO: namespace: e2e-tests-kubectl-2x2z9, resource: bindings, ignored listing per whitelist +May 29 18:33:09.677: INFO: namespace e2e-tests-kubectl-2x2z9 deletion completed in 22.297783648s + +• [SLOW TEST:22.910 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run job + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run pod + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:33:09.677: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-g8bxx +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1527 +[It] should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +May 29 18:33:09.968: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 run e2e-test-nginx-pod --restart=Never --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-g8bxx' +May 29 18:33:10.094: INFO: stderr: "" +May 29 18:33:10.094: INFO: stdout: "pod/e2e-test-nginx-pod created\n" +STEP: verifying the pod e2e-test-nginx-pod was created +[AfterEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1532 +May 29 18:33:10.100: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete pods e2e-test-nginx-pod --namespace=e2e-tests-kubectl-g8bxx' +May 29 18:33:13.715: INFO: stderr: "" +May 29 18:33:13.715: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:33:13.715: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-g8bxx" for this suite. +May 29 18:33:19.757: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:33:19.927: INFO: namespace: e2e-tests-kubectl-g8bxx, resource: bindings, ignored listing per whitelist +May 29 18:33:20.051: INFO: namespace e2e-tests-kubectl-g8bxx deletion completed in 6.327790428s + +• [SLOW TEST:10.374 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run pod + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Projected downwardAPI + should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:33:20.051: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-69pwv +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:33:20.368: INFO: Waiting up to 5m0s for pod "downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-69pwv" to be "success or failure" +May 29 18:33:20.374: INFO: Pod "downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.638593ms +May 29 18:33:22.382: INFO: Pod "downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014118139s +May 29 18:33:24.389: INFO: Pod "downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021202001s +STEP: Saw pod success +May 29 18:33:24.389: INFO: Pod "downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:33:24.395: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:33:24.425: INFO: Waiting for pod downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:33:24.431: INFO: Pod downwardapi-volume-3bc1abbe-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:33:24.431: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-69pwv" for this suite. +May 29 18:33:30.471: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:33:30.610: INFO: namespace: e2e-tests-projected-69pwv, resource: bindings, ignored listing per whitelist +May 29 18:33:30.800: INFO: namespace e2e-tests-projected-69pwv deletion completed in 6.361989637s + +• [SLOW TEST:10.749 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:33:30.800: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-ck256 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-4224e900-8240-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:33:31.092: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-ck256" to be "success or failure" +May 29 18:33:31.100: INFO: Pod "pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.649621ms +May 29 18:33:33.107: INFO: Pod "pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014337709s +May 29 18:33:35.114: INFO: Pod "pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021817626s +STEP: Saw pod success +May 29 18:33:35.114: INFO: Pod "pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:33:35.120: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d container projected-configmap-volume-test: +STEP: delete the pod +May 29 18:33:35.150: INFO: Waiting for pod pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:33:35.156: INFO: Pod pod-projected-configmaps-4225f8ff-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:33:35.156: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-ck256" for this suite. +May 29 18:33:41.193: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:33:41.348: INFO: namespace: e2e-tests-projected-ck256, resource: bindings, ignored listing per whitelist +May 29 18:33:41.457: INFO: namespace e2e-tests-projected-ck256 deletion completed in 6.293979566s + +• [SLOW TEST:10.657 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[k8s.io] Probing container + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:33:41.457: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-mfxpc +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 29 18:34:05.765: INFO: Container started at 2019-05-29 18:33:43 +0000 UTC, pod became ready at 2019-05-29 18:34:04 +0000 UTC +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:34:05.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-mfxpc" for this suite. +May 29 18:34:27.794: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:34:28.011: INFO: namespace: e2e-tests-container-probe-mfxpc, resource: bindings, ignored listing per whitelist +May 29 18:34:28.074: INFO: namespace e2e-tests-container-probe-mfxpc deletion completed in 22.301597472s + +• [SLOW TEST:46.617 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-apps] ReplicationController + should release no longer matching pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:34:28.074: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename replication-controller +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replication-controller-djlw5 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should release no longer matching pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Given a ReplicationController is created +STEP: When the matched label of one of its pods change +May 29 18:34:28.350: INFO: Pod name pod-release: Found 0 pods out of 1 +May 29 18:34:33.357: INFO: Pod name pod-release: Found 1 pods out of 1 +STEP: Then the pod is released +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:34:34.386: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-replication-controller-djlw5" for this suite. +May 29 18:34:40.414: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:34:40.827: INFO: namespace: e2e-tests-replication-controller-djlw5, resource: bindings, ignored listing per whitelist +May 29 18:34:41.033: INFO: namespace e2e-tests-replication-controller-djlw5 deletion completed in 6.638813378s + +• [SLOW TEST:12.960 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should release no longer matching pods [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:34:41.034: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-qblqn +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-map-6c04aacd-8240-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume secrets +May 29 18:34:41.347: INFO: Waiting up to 5m0s for pod "pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-qblqn" to be "success or failure" +May 29 18:34:41.353: INFO: Pod "pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.780291ms +May 29 18:34:43.360: INFO: Pod "pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013321762s +May 29 18:34:45.368: INFO: Pod "pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021123502s +STEP: Saw pod success +May 29 18:34:45.368: INFO: Pod "pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:34:45.376: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d container secret-volume-test: +STEP: delete the pod +May 29 18:34:45.404: INFO: Waiting for pod pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:34:45.410: INFO: Pod pod-secrets-6c05b81a-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:34:45.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-qblqn" for this suite. +May 29 18:34:51.444: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:34:51.766: INFO: namespace: e2e-tests-secrets-qblqn, resource: bindings, ignored listing per whitelist +May 29 18:34:51.766: INFO: namespace e2e-tests-secrets-qblqn deletion completed in 6.349323359s + +• [SLOW TEST:10.732 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run deployment + should create a deployment from an image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:34:51.766: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-fhm72 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1399 +[It] should create a deployment from an image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +May 29 18:34:52.026: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --generator=deployment/v1beta1 --namespace=e2e-tests-kubectl-fhm72' +May 29 18:34:52.130: INFO: stderr: "kubectl run --generator=deployment/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +May 29 18:34:52.130: INFO: stdout: "deployment.extensions/e2e-test-nginx-deployment created\n" +STEP: verifying the deployment e2e-test-nginx-deployment was created +STEP: verifying the pod controlled by deployment e2e-test-nginx-deployment was created +[AfterEach] [k8s.io] Kubectl run deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1404 +May 29 18:34:54.150: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete deployment e2e-test-nginx-deployment --namespace=e2e-tests-kubectl-fhm72' +May 29 18:34:54.251: INFO: stderr: "" +May 29 18:34:54.251: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:34:54.251: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-fhm72" for this suite. +May 29 18:35:00.276: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:35:00.324: INFO: namespace: e2e-tests-kubectl-fhm72, resource: bindings, ignored listing per whitelist +May 29 18:35:00.536: INFO: namespace e2e-tests-kubectl-fhm72 deletion completed in 6.279073198s + +• [SLOW TEST:8.769 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create a deployment from an image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:35:00.536: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-6qnpt +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:35:00.868: INFO: Waiting up to 5m0s for pod "downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-6qnpt" to be "success or failure" +May 29 18:35:00.874: INFO: Pod "downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.794527ms +May 29 18:35:02.882: INFO: Pod "downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013421128s +May 29 18:35:04.889: INFO: Pod "downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021205105s +STEP: Saw pod success +May 29 18:35:04.890: INFO: Pod "downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:35:04.895: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:35:04.926: INFO: Waiting for pod downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:35:04.932: INFO: Pod downwardapi-volume-77a8ad2e-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:35:04.932: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-6qnpt" for this suite. +May 29 18:35:10.961: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:35:11.185: INFO: namespace: e2e-tests-projected-6qnpt, resource: bindings, ignored listing per whitelist +May 29 18:35:11.215: INFO: namespace e2e-tests-projected-6qnpt deletion completed in 6.275617505s + +• [SLOW TEST:10.679 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:35:11.216: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-8mvdh +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +May 29 18:35:21.587: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +May 29 18:35:21.594: INFO: Pod pod-with-poststart-http-hook still exists +May 29 18:35:23.595: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +May 29 18:35:23.602: INFO: Pod pod-with-poststart-http-hook still exists +May 29 18:35:25.595: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +May 29 18:35:25.602: INFO: Pod pod-with-poststart-http-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:35:25.603: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-8mvdh" for this suite. +May 29 18:35:47.630: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:35:47.905: INFO: namespace: e2e-tests-container-lifecycle-hook-8mvdh, resource: bindings, ignored listing per whitelist +May 29 18:35:47.917: INFO: namespace e2e-tests-container-lifecycle-hook-8mvdh deletion completed in 22.307118509s + +• [SLOW TEST:36.701 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when create a pod with lifecycle hook + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:35:47.919: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-9rqb9 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:35:48.211: INFO: Waiting up to 5m0s for pod "downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-9rqb9" to be "success or failure" +May 29 18:35:48.216: INFO: Pod "downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.730867ms +May 29 18:35:50.225: INFO: Pod "downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014020583s +May 29 18:35:52.239: INFO: Pod "downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027942s +STEP: Saw pod success +May 29 18:35:52.239: INFO: Pod "downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:35:52.244: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:35:52.271: INFO: Waiting for pod downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:35:52.276: INFO: Pod downwardapi-volume-93e08cb9-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:35:52.276: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-9rqb9" for this suite. +May 29 18:35:58.311: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:35:58.472: INFO: namespace: e2e-tests-downward-api-9rqb9, resource: bindings, ignored listing per whitelist +May 29 18:35:58.584: INFO: namespace e2e-tests-downward-api-9rqb9 deletion completed in 6.295082283s + +• [SLOW TEST:10.665 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-node] Downward API + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:35:58.584: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-f4g4s +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +May 29 18:35:58.923: INFO: Waiting up to 5m0s for pod "downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-f4g4s" to be "success or failure" +May 29 18:35:58.931: INFO: Pod "downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.123814ms +May 29 18:36:00.939: INFO: Pod "downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015028287s +May 29 18:36:02.953: INFO: Pod "downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029261469s +STEP: Saw pod success +May 29 18:36:02.953: INFO: Pod "downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:36:02.959: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d container dapi-container: +STEP: delete the pod +May 29 18:36:02.988: INFO: Waiting for pod downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:36:02.993: INFO: Pod downward-api-9a430608-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:36:02.994: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-f4g4s" for this suite. +May 29 18:36:09.021: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:36:09.286: INFO: namespace: e2e-tests-downward-api-f4g4s, resource: bindings, ignored listing per whitelist +May 29 18:36:09.314: INFO: namespace e2e-tests-downward-api-f4g4s deletion completed in 6.314000296s + +• [SLOW TEST:10.731 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:36:09.315: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-rtnjr +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-a09f1ec7-8240-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:36:09.598: INFO: Waiting up to 5m0s for pod "pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-rtnjr" to be "success or failure" +May 29 18:36:09.604: INFO: Pod "pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.532584ms +May 29 18:36:11.611: INFO: Pod "pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0129329s +May 29 18:36:13.626: INFO: Pod "pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027862812s +STEP: Saw pod success +May 29 18:36:13.626: INFO: Pod "pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:36:13.632: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d container configmap-volume-test: +STEP: delete the pod +May 29 18:36:13.660: INFO: Waiting for pod pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:36:13.666: INFO: Pod pod-configmaps-a0a01264-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:36:13.666: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-rtnjr" for this suite. +May 29 18:36:19.705: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:36:19.755: INFO: namespace: e2e-tests-configmap-rtnjr, resource: bindings, ignored listing per whitelist +May 29 18:36:19.952: INFO: namespace e2e-tests-configmap-rtnjr deletion completed in 6.275067772s + +• [SLOW TEST:10.637 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-storage] EmptyDir volumes + volume on default medium should have the correct mode [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:36:19.952: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-8fdb2 +STEP: Waiting for a default service account to be provisioned in namespace +[It] volume on default medium should have the correct mode [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir volume type on node default medium +May 29 18:36:20.252: INFO: Waiting up to 5m0s for pod "pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-8fdb2" to be "success or failure" +May 29 18:36:20.258: INFO: Pod "pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.6101ms +May 29 18:36:22.266: INFO: Pod "pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013454199s +May 29 18:36:24.281: INFO: Pod "pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029090698s +STEP: Saw pod success +May 29 18:36:24.282: INFO: Pod "pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:36:24.288: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d container test-container: +STEP: delete the pod +May 29 18:36:24.318: INFO: Waiting for pod pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:36:24.324: INFO: Pod pod-a6f99c82-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:36:24.324: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-8fdb2" for this suite. +May 29 18:36:30.356: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:36:30.475: INFO: namespace: e2e-tests-emptydir-8fdb2, resource: bindings, ignored listing per whitelist +May 29 18:36:30.721: INFO: namespace e2e-tests-emptydir-8fdb2 deletion completed in 6.389068057s + +• [SLOW TEST:10.769 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + volume on default medium should have the correct mode [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should be consumable via environment variable [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:36:30.723: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-rc4cn +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via environment variable [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap e2e-tests-configmap-rc4cn/configmap-test-ad6f8f29-8240-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:36:31.118: INFO: Waiting up to 5m0s for pod "pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-rc4cn" to be "success or failure" +May 29 18:36:31.126: INFO: Pod "pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 8.14918ms +May 29 18:36:33.133: INFO: Pod "pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015637701s +May 29 18:36:35.251: INFO: Pod "pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.133363406s +STEP: Saw pod success +May 29 18:36:35.251: INFO: Pod "pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:36:35.262: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d container env-test: +STEP: delete the pod +May 29 18:36:35.333: INFO: Waiting for pod pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:36:35.339: INFO: Pod pod-configmaps-ad70a42e-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:36:35.339: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-rc4cn" for this suite. +May 29 18:36:41.368: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:36:41.516: INFO: namespace: e2e-tests-configmap-rc4cn, resource: bindings, ignored listing per whitelist +May 29 18:36:41.706: INFO: namespace e2e-tests-configmap-rc4cn deletion completed in 6.358223457s + +• [SLOW TEST:10.983 seconds] +[sig-node] ConfigMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31 + should be consumable via environment variable [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[k8s.io] Variable Expansion + should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:36:41.706: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename var-expansion +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-var-expansion-7bgw8 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test substitution in container's command +May 29 18:36:42.063: INFO: Waiting up to 5m0s for pod "var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-var-expansion-7bgw8" to be "success or failure" +May 29 18:36:42.070: INFO: Pod "var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.168417ms +May 29 18:36:44.077: INFO: Pod "var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014351013s +May 29 18:36:46.092: INFO: Pod "var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029727023s +STEP: Saw pod success +May 29 18:36:46.093: INFO: Pod "var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:36:46.100: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d container dapi-container: +STEP: delete the pod +May 29 18:36:46.130: INFO: Waiting for pod var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d to disappear +May 29 18:36:46.135: INFO: Pod var-expansion-b3f98f10-8240-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:36:46.135: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-var-expansion-7bgw8" for this suite. +May 29 18:36:52.166: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:36:52.304: INFO: namespace: e2e-tests-var-expansion-7bgw8, resource: bindings, ignored listing per whitelist +May 29 18:36:52.408: INFO: namespace e2e-tests-var-expansion-7bgw8 deletion completed in 6.263788732s + +• [SLOW TEST:10.702 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-api-machinery] Watchers + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:36:52.409: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename watch +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-bcxdj +STEP: Waiting for a default service account to be provisioned in namespace +[It] should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a watch on configmaps with label A +STEP: creating a watch on configmaps with label B +STEP: creating a watch on configmaps with label A or B +STEP: creating a configmap with label A and ensuring the correct watchers observe the notification +May 29 18:36:52.715: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948781635,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +May 29 18:36:52.715: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948781635,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +STEP: modifying configmap A and ensuring the correct watchers observe the notification +May 29 18:37:02.737: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948782385,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +May 29 18:37:02.737: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948782385,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +STEP: modifying configmap A again and ensuring the correct watchers observe the notification +May 29 18:37:12.762: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948783168,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +May 29 18:37:12.762: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948783168,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +STEP: deleting configmap A and ensuring the correct watchers observe the notification +May 29 18:37:22.784: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948783921,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +May 29 18:37:22.784: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-a,UID:ba548757-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948783921,Generation:0,CreationTimestamp:2019-05-29 18:36:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +STEP: creating a configmap with label B and ensuring the correct watchers observe the notification +May 29 18:37:32.804: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-b,UID:d2390a20-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948784684,Generation:0,CreationTimestamp:2019-05-29 18:37:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +May 29 18:37:32.804: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-b,UID:d2390a20-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948784684,Generation:0,CreationTimestamp:2019-05-29 18:37:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +STEP: deleting configmap B and ensuring the correct watchers observe the notification +May 29 18:37:42.826: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-b,UID:d2390a20-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948785427,Generation:0,CreationTimestamp:2019-05-29 18:37:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +May 29 18:37:42.826: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-bcxdj,SelfLink:/api/v1/namespaces/e2e-tests-watch-bcxdj/configmaps/e2e-watch-test-configmap-b,UID:d2390a20-8240-11e9-9b18-c2b4512ea1b9,ResourceVersion:948785427,Generation:0,CreationTimestamp:2019-05-29 18:37:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:37:52.827: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-bcxdj" for this suite. +May 29 18:37:58.867: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:37:59.116: INFO: namespace: e2e-tests-watch-bcxdj, resource: bindings, ignored listing per whitelist +May 29 18:37:59.138: INFO: namespace e2e-tests-watch-bcxdj deletion completed in 6.294565311s + +• [SLOW TEST:66.729 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:37:59.139: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename statefulset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-vlx5d +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-vlx5d +[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Initializing watcher for selector baz=blah,foo=bar +STEP: Creating stateful set ss in namespace e2e-tests-statefulset-vlx5d +STEP: Waiting until all stateful set ss replicas will be running in namespace e2e-tests-statefulset-vlx5d +May 29 18:37:59.430: INFO: Found 0 stateful pods, waiting for 1 +May 29 18:38:09.446: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod +May 29 18:38:09.453: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 29 18:38:09.694: INFO: stderr: "" +May 29 18:38:09.694: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 29 18:38:09.694: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 29 18:38:09.701: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +May 29 18:38:19.717: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +May 29 18:38:19.717: INFO: Waiting for statefulset status.replicas updated to 0 +May 29 18:38:19.745: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.99999958s +May 29 18:38:20.754: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.992536352s +May 29 18:38:21.761: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.983093906s +May 29 18:38:22.769: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.975579853s +May 29 18:38:23.778: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.968012651s +May 29 18:38:24.786: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.95891909s +May 29 18:38:25.794: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.950999395s +May 29 18:38:26.803: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.942538013s +May 29 18:38:27.811: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.934260816s +May 29 18:38:28.819: INFO: Verifying statefulset ss doesn't scale past 1 for another 926.368161ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace e2e-tests-statefulset-vlx5d +May 29 18:38:29.835: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 29 18:38:30.055: INFO: stderr: "" +May 29 18:38:30.055: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 29 18:38:30.055: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 29 18:38:30.061: INFO: Found 1 stateful pods, waiting for 3 +May 29 18:38:40.078: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +May 29 18:38:40.078: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +May 29 18:38:40.078: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Verifying that stateful set ss was scaled up in order +STEP: Scale down will halt with unhealthy stateful pod +May 29 18:38:40.089: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 29 18:38:40.360: INFO: stderr: "" +May 29 18:38:40.360: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 29 18:38:40.360: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 29 18:38:40.360: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 29 18:38:40.669: INFO: stderr: "" +May 29 18:38:40.669: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 29 18:38:40.669: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 29 18:38:40.669: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-2 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 29 18:38:40.960: INFO: stderr: "" +May 29 18:38:40.960: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 29 18:38:40.960: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 29 18:38:40.960: INFO: Waiting for statefulset status.replicas updated to 0 +May 29 18:38:40.967: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 1 +May 29 18:38:50.989: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +May 29 18:38:50.989: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +May 29 18:38:50.989: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +May 29 18:38:51.009: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999248s +May 29 18:38:52.018: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.992891379s +May 29 18:38:53.026: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.984376295s +May 29 18:38:54.034: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.975794586s +May 29 18:38:55.041: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.967787708s +May 29 18:38:56.049: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.960317076s +May 29 18:38:57.058: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.952809216s +May 29 18:38:58.066: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.944127389s +May 29 18:38:59.074: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.935785891s +May 29 18:39:00.083: INFO: Verifying statefulset ss doesn't scale past 3 for another 927.741772ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacee2e-tests-statefulset-vlx5d +May 29 18:39:01.099: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 29 18:39:01.346: INFO: stderr: "" +May 29 18:39:01.346: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 29 18:39:01.346: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 29 18:39:01.346: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 29 18:39:01.641: INFO: stderr: "" +May 29 18:39:01.641: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 29 18:39:01.641: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 29 18:39:01.641: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-vlx5d ss-2 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 29 18:39:01.902: INFO: stderr: "" +May 29 18:39:01.902: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 29 18:39:01.902: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 29 18:39:01.902: INFO: Scaling statefulset ss to 0 +STEP: Verifying that stateful set ss was scaled down in reverse order +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +May 29 18:39:31.940: INFO: Deleting all statefulset in ns e2e-tests-statefulset-vlx5d +May 29 18:39:31.946: INFO: Scaling statefulset ss to 0 +May 29 18:39:31.968: INFO: Waiting for statefulset status.replicas updated to 0 +May 29 18:39:31.974: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:39:32.000: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-vlx5d" for this suite. +May 29 18:39:38.034: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:39:38.147: INFO: namespace: e2e-tests-statefulset-vlx5d, resource: bindings, ignored listing per whitelist +May 29 18:39:38.305: INFO: namespace e2e-tests-statefulset-vlx5d deletion completed in 6.297467884s + +• [SLOW TEST:99.167 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[k8s.io] Pods + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:39:38.306: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-ks66r +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +May 29 18:39:43.142: INFO: Successfully updated pod "pod-update-activedeadlineseconds-1d328b81-8241-11e9-bd6e-667e8fbec69d" +May 29 18:39:43.142: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-1d328b81-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-pods-ks66r" to be "terminated due to deadline exceeded" +May 29 18:39:43.148: INFO: Pod "pod-update-activedeadlineseconds-1d328b81-8241-11e9-bd6e-667e8fbec69d": Phase="Running", Reason="", readiness=true. Elapsed: 5.952674ms +May 29 18:39:45.155: INFO: Pod "pod-update-activedeadlineseconds-1d328b81-8241-11e9-bd6e-667e8fbec69d": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 2.0127249s +May 29 18:39:45.155: INFO: Pod "pod-update-activedeadlineseconds-1d328b81-8241-11e9-bd6e-667e8fbec69d" satisfied condition "terminated due to deadline exceeded" +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:39:45.155: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-ks66r" for this suite. +May 29 18:39:51.187: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:39:51.347: INFO: namespace: e2e-tests-pods-ks66r, resource: bindings, ignored listing per whitelist +May 29 18:39:51.416: INFO: namespace e2e-tests-pods-ks66r deletion completed in 6.253211379s + +• [SLOW TEST:13.110 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:39:51.418: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename container-probe +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-x6ngl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-x6ngl +May 29 18:39:55.711: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-x6ngl +STEP: checking the pod's current state and verifying that restartCount is present +May 29 18:39:55.718: INFO: Initial restart count of pod liveness-http is 0 +May 29 18:40:19.861: INFO: Restart count of pod e2e-tests-container-probe-x6ngl/liveness-http is now 1 (24.143239092s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:40:19.876: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-x6ngl" for this suite. +May 29 18:40:25.906: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:40:25.975: INFO: namespace: e2e-tests-container-probe-x6ngl, resource: bindings, ignored listing per whitelist +May 29 18:40:26.203: INFO: namespace e2e-tests-container-probe-x6ngl deletion completed in 6.31923567s + +• [SLOW TEST:34.785 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[k8s.io] Container Runtime blackbox test when starting a container that exits + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Runtime + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:40:26.204: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename container-runtime +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-runtime-dvvwz +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpa': should get the expected 'State' +STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpof': should get the expected 'State' +STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpn': should get the expected 'State' +STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] +[AfterEach] [k8s.io] Container Runtime + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:40:52.944: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-runtime-dvvwz" for this suite. +May 29 18:40:58.976: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:40:59.156: INFO: namespace: e2e-tests-container-runtime-dvvwz, resource: bindings, ignored listing per whitelist +May 29 18:40:59.257: INFO: namespace e2e-tests-container-runtime-dvvwz deletion completed in 6.305371653s + +• [SLOW TEST:33.054 seconds] +[k8s.io] Container Runtime +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + blackbox test + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:37 + when starting a container that exits + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38 + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:40:59.258: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-vwxfj +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name cm-test-opt-del-4d787145-8241-11e9-bd6e-667e8fbec69d +STEP: Creating configMap with name cm-test-opt-upd-4d7871b9-8241-11e9-bd6e-667e8fbec69d +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-4d787145-8241-11e9-bd6e-667e8fbec69d +STEP: Updating configmap cm-test-opt-upd-4d7871b9-8241-11e9-bd6e-667e8fbec69d +STEP: Creating configMap with name cm-test-opt-create-4d7871f2-8241-11e9-bd6e-667e8fbec69d +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:41:05.788: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-vwxfj" for this suite. +May 29 18:41:27.831: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:41:28.034: INFO: namespace: e2e-tests-configmap-vwxfj, resource: bindings, ignored listing per whitelist +May 29 18:41:28.564: INFO: namespace e2e-tests-configmap-vwxfj deletion completed in 22.765406756s + +• [SLOW TEST:29.306 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl api-versions + should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:41:28.564: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-l2gc6 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: validating api versions +May 29 18:41:28.857: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 api-versions' +May 29 18:41:28.984: INFO: stderr: "" +May 29 18:41:28.984: INFO: stdout: "admissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\napps/v1beta1\napps/v1beta2\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1beta1\ncoordination.k8s.io/v1beta1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nmetrics.k8s.io/v1beta1\nnetworking.k8s.io/v1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1beta1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:41:28.984: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-l2gc6" for this suite. +May 29 18:41:35.018: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:41:35.648: INFO: namespace: e2e-tests-kubectl-l2gc6, resource: bindings, ignored listing per whitelist +May 29 18:41:35.932: INFO: namespace e2e-tests-kubectl-l2gc6 deletion completed in 6.939796116s + +• [SLOW TEST:7.367 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl api-versions + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check if v1 is in available api versions [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:41:35.932: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-rdg4g +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0644 on tmpfs +May 29 18:41:36.289: INFO: Waiting up to 5m0s for pod "pod-63591dcb-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-rdg4g" to be "success or failure" +May 29 18:41:36.325: INFO: Pod "pod-63591dcb-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 35.591938ms +May 29 18:41:38.341: INFO: Pod "pod-63591dcb-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.051510917s +May 29 18:41:40.349: INFO: Pod "pod-63591dcb-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.059324819s +STEP: Saw pod success +May 29 18:41:40.349: INFO: Pod "pod-63591dcb-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:41:40.355: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-63591dcb-8241-11e9-bd6e-667e8fbec69d container test-container: +STEP: delete the pod +May 29 18:41:40.385: INFO: Waiting for pod pod-63591dcb-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:41:40.390: INFO: Pod pod-63591dcb-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:41:40.390: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-rdg4g" for this suite. +May 29 18:41:46.420: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:41:46.606: INFO: namespace: e2e-tests-emptydir-rdg4g, resource: bindings, ignored listing per whitelist +May 29 18:41:46.663: INFO: namespace e2e-tests-emptydir-rdg4g deletion completed in 6.264466193s + +• [SLOW TEST:10.730 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[k8s.io] Pods + should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:41:46.663: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-zq9sk +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +May 29 18:41:51.538: INFO: Successfully updated pod "pod-update-69bad12d-8241-11e9-bd6e-667e8fbec69d" +STEP: verifying the updated pod is in kubernetes +May 29 18:41:51.554: INFO: Pod update OK +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:41:51.554: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-zq9sk" for this suite. +May 29 18:42:13.585: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:42:13.756: INFO: namespace: e2e-tests-pods-zq9sk, resource: bindings, ignored listing per whitelist +May 29 18:42:13.871: INFO: namespace e2e-tests-pods-zq9sk deletion completed in 22.308397785s + +• [SLOW TEST:27.208 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:42:13.871: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-mzkhr +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:42:14.438: INFO: Waiting up to 5m0s for pod "downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-mzkhr" to be "success or failure" +May 29 18:42:14.445: INFO: Pod "downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.555876ms +May 29 18:42:16.451: INFO: Pod "downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013273238s +May 29 18:42:18.462: INFO: Pod "downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024005834s +STEP: Saw pod success +May 29 18:42:18.462: INFO: Pod "downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:42:18.474: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:42:18.519: INFO: Waiting for pod downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:42:18.530: INFO: Pod downwardapi-volume-7a1637b0-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:42:18.530: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-mzkhr" for this suite. +May 29 18:42:24.578: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:42:24.832: INFO: namespace: e2e-tests-projected-mzkhr, resource: bindings, ignored listing per whitelist +May 29 18:42:24.884: INFO: namespace e2e-tests-projected-mzkhr deletion completed in 6.342965862s + +• [SLOW TEST:11.013 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-node] Downward API + should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:42:24.885: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-nc6wc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +May 29 18:42:25.171: INFO: Waiting up to 5m0s for pod "downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-nc6wc" to be "success or failure" +May 29 18:42:25.177: INFO: Pod "downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.286982ms +May 29 18:42:27.185: INFO: Pod "downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014139857s +May 29 18:42:29.192: INFO: Pod "downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021492332s +STEP: Saw pod success +May 29 18:42:29.192: INFO: Pod "downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:42:29.199: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d container dapi-container: +STEP: delete the pod +May 29 18:42:29.226: INFO: Waiting for pod downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:42:29.232: INFO: Pod downward-api-807bcfce-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:42:29.232: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-nc6wc" for this suite. +May 29 18:42:35.261: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:42:35.409: INFO: namespace: e2e-tests-downward-api-nc6wc, resource: bindings, ignored listing per whitelist +May 29 18:42:35.541: INFO: namespace e2e-tests-downward-api-nc6wc deletion completed in 6.302063789s + +• [SLOW TEST:10.656 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide pod UID as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0777,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:42:35.542: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-555td +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0777,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0777 on node default medium +May 29 18:42:35.871: INFO: Waiting up to 5m0s for pod "pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-555td" to be "success or failure" +May 29 18:42:35.877: INFO: Pod "pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.900666ms +May 29 18:42:37.884: INFO: Pod "pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013501147s +May 29 18:42:39.892: INFO: Pod "pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020820811s +STEP: Saw pod success +May 29 18:42:39.892: INFO: Pod "pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:42:39.898: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d container test-container: +STEP: delete the pod +May 29 18:42:39.924: INFO: Waiting for pod pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:42:39.930: INFO: Pod pod-86dc8e01-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:42:39.930: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-555td" for this suite. +May 29 18:42:45.958: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:42:46.024: INFO: namespace: e2e-tests-emptydir-555td, resource: bindings, ignored listing per whitelist +May 29 18:42:46.236: INFO: namespace e2e-tests-emptydir-555td deletion completed in 6.298704948s + +• [SLOW TEST:10.694 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0777,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Downward API volume + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:42:46.236: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-l2xf6 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:42:46.509: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-l2xf6" to be "success or failure" +May 29 18:42:46.516: INFO: Pod "downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.904292ms +May 29 18:42:48.525: INFO: Pod "downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014967903s +May 29 18:42:50.532: INFO: Pod "downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022741009s +STEP: Saw pod success +May 29 18:42:50.532: INFO: Pod "downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:42:50.539: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:42:50.567: INFO: Waiting for pod downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:42:50.574: INFO: Pod downwardapi-volume-8d33c6d5-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:42:50.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-l2xf6" for this suite. +May 29 18:42:56.609: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:42:56.808: INFO: namespace: e2e-tests-downward-api-l2xf6, resource: bindings, ignored listing per whitelist +May 29 18:42:56.833: INFO: namespace e2e-tests-downward-api-l2xf6 deletion completed in 6.250745717s + +• [SLOW TEST:10.597 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl logs + should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:42:56.833: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-ng6t6 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl logs + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1134 +STEP: creating an rc +May 29 18:42:57.110: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-ng6t6' +May 29 18:42:57.852: INFO: stderr: "" +May 29 18:42:57.852: INFO: stdout: "replicationcontroller/redis-master created\n" +[It] should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Waiting for Redis master to start. +May 29 18:42:58.860: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:42:58.860: INFO: Found 0 / 1 +May 29 18:42:59.860: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:42:59.860: INFO: Found 0 / 1 +May 29 18:43:00.858: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:43:00.858: INFO: Found 1 / 1 +May 29 18:43:00.858: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +May 29 18:43:00.863: INFO: Selector matched 1 pods for map[app:redis] +May 29 18:43:00.864: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +STEP: checking for a matching strings +May 29 18:43:00.864: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 logs redis-master-6vn7p redis-master --namespace=e2e-tests-kubectl-ng6t6' +May 29 18:43:01.030: INFO: stderr: "" +May 29 18:43:01.030: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 29 May 18:42:59.177 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 29 May 18:42:59.177 # Server started, Redis version 3.2.12\n1:M 29 May 18:42:59.177 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 29 May 18:42:59.177 * The server is now ready to accept connections on port 6379\n" +STEP: limiting log lines +May 29 18:43:01.030: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 log redis-master-6vn7p redis-master --namespace=e2e-tests-kubectl-ng6t6 --tail=1' +May 29 18:43:01.171: INFO: stderr: "" +May 29 18:43:01.171: INFO: stdout: "1:M 29 May 18:42:59.177 * The server is now ready to accept connections on port 6379\n" +STEP: limiting log bytes +May 29 18:43:01.171: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 log redis-master-6vn7p redis-master --namespace=e2e-tests-kubectl-ng6t6 --limit-bytes=1' +May 29 18:43:01.316: INFO: stderr: "" +May 29 18:43:01.316: INFO: stdout: " " +STEP: exposing timestamps +May 29 18:43:01.316: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 log redis-master-6vn7p redis-master --namespace=e2e-tests-kubectl-ng6t6 --tail=1 --timestamps' +May 29 18:43:01.446: INFO: stderr: "" +May 29 18:43:01.446: INFO: stdout: "2019-05-29T18:42:59.177460524Z 1:M 29 May 18:42:59.177 * The server is now ready to accept connections on port 6379\n" +STEP: restricting to a time range +May 29 18:43:03.946: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 log redis-master-6vn7p redis-master --namespace=e2e-tests-kubectl-ng6t6 --since=1s' +May 29 18:43:04.080: INFO: stderr: "" +May 29 18:43:04.080: INFO: stdout: "" +May 29 18:43:04.080: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 log redis-master-6vn7p redis-master --namespace=e2e-tests-kubectl-ng6t6 --since=24h' +May 29 18:43:04.218: INFO: stderr: "" +May 29 18:43:04.218: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 29 May 18:42:59.177 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 29 May 18:42:59.177 # Server started, Redis version 3.2.12\n1:M 29 May 18:42:59.177 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 29 May 18:42:59.177 * The server is now ready to accept connections on port 6379\n" +[AfterEach] [k8s.io] Kubectl logs + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1140 +STEP: using delete to clean up resources +May 29 18:43:04.219: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-ng6t6' +May 29 18:43:04.335: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 29 18:43:04.335: INFO: stdout: "replicationcontroller \"redis-master\" force deleted\n" +May 29 18:43:04.335: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get rc,svc -l name=nginx --no-headers --namespace=e2e-tests-kubectl-ng6t6' +May 29 18:43:04.444: INFO: stderr: "No resources found.\n" +May 29 18:43:04.444: INFO: stdout: "" +May 29 18:43:04.444: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -l name=nginx --namespace=e2e-tests-kubectl-ng6t6 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +May 29 18:43:04.562: INFO: stderr: "" +May 29 18:43:04.562: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:43:04.562: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-ng6t6" for this suite. +May 29 18:43:10.593: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:43:10.775: INFO: namespace: e2e-tests-kubectl-ng6t6, resource: bindings, ignored listing per whitelist +May 29 18:43:10.890: INFO: namespace e2e-tests-kubectl-ng6t6 deletion completed in 6.317401734s + +• [SLOW TEST:14.057 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl logs + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:43:10.890: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename watch +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-5ggkt +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: modifying the configmap a second time +STEP: deleting the configmap +STEP: creating a watch on configmaps from the resource version returned by the first update +STEP: Expecting to observe notifications for all changes to the configmap after the first update +May 29 18:43:11.217: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:e2e-tests-watch-5ggkt,SelfLink:/api/v1/namespaces/e2e-tests-watch-5ggkt/configmaps/e2e-watch-test-resource-version,UID:9be6230a-8241-11e9-9b18-c2b4512ea1b9,ResourceVersion:948810747,Generation:0,CreationTimestamp:2019-05-29 18:43:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +May 29 18:43:11.217: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:e2e-tests-watch-5ggkt,SelfLink:/api/v1/namespaces/e2e-tests-watch-5ggkt/configmaps/e2e-watch-test-resource-version,UID:9be6230a-8241-11e9-9b18-c2b4512ea1b9,ResourceVersion:948810749,Generation:0,CreationTimestamp:2019-05-29 18:43:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:43:11.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-5ggkt" for this suite. +May 29 18:43:17.256: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:43:17.466: INFO: namespace: e2e-tests-watch-5ggkt, resource: bindings, ignored listing per whitelist +May 29 18:43:17.533: INFO: namespace e2e-tests-watch-5ggkt deletion completed in 6.30714745s + +• [SLOW TEST:6.642 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:43:17.533: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-x8bsp +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating the pod +May 29 18:43:22.459: INFO: Successfully updated pod "annotationupdate9fdd7c25-8241-11e9-bd6e-667e8fbec69d" +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:43:24.490: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-x8bsp" for this suite. +May 29 18:43:46.518: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:43:46.707: INFO: namespace: e2e-tests-projected-x8bsp, resource: bindings, ignored listing per whitelist +May 29 18:43:46.786: INFO: namespace e2e-tests-projected-x8bsp deletion completed in 22.288336596s + +• [SLOW TEST:29.253 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:43:46.787: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-5gk2x +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:43:47.057: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-5gk2x" to be "success or failure" +May 29 18:43:47.063: INFO: Pod "downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.597386ms +May 29 18:43:49.070: INFO: Pod "downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01291178s +May 29 18:43:51.085: INFO: Pod "downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027317165s +STEP: Saw pod success +May 29 18:43:51.085: INFO: Pod "downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:43:51.091: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:43:51.119: INFO: Waiting for pod downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:43:51.125: INFO: Pod downwardapi-volume-b14ac09a-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:43:51.125: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-5gk2x" for this suite. +May 29 18:43:57.154: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:43:57.404: INFO: namespace: e2e-tests-downward-api-5gk2x, resource: bindings, ignored listing per whitelist +May 29 18:43:57.463: INFO: namespace e2e-tests-downward-api-5gk2x deletion completed in 6.330385878s + +• [SLOW TEST:10.676 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[k8s.io] Pods + should contain environment variables for services [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:43:57.463: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-677k5 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should contain environment variables for services [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 29 18:43:59.830: INFO: Waiting up to 5m0s for pod "client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-pods-677k5" to be "success or failure" +May 29 18:43:59.837: INFO: Pod "client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.30241ms +May 29 18:44:01.852: INFO: Pod "client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021888134s +May 29 18:44:03.863: INFO: Pod "client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033554184s +STEP: Saw pod success +May 29 18:44:03.863: INFO: Pod "client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:44:03.869: INFO: Trying to get logs from node scw-sono13-default-2865dd8133304358ae8da697bb2 pod client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d container env3cont: +STEP: delete the pod +May 29 18:44:03.904: INFO: Waiting for pod client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:44:03.910: INFO: Pod client-envvars-b8e85dcd-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:44:03.910: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-677k5" for this suite. +May 29 18:44:45.940: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:44:46.147: INFO: namespace: e2e-tests-pods-677k5, resource: bindings, ignored listing per whitelist +May 29 18:44:46.174: INFO: namespace e2e-tests-pods-677k5 deletion completed in 42.255957586s + +• [SLOW TEST:48.711 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should contain environment variables for services [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSS +------------------------------ +[k8s.io] Pods + should get a host IP [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:44:46.175: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename pods +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-lpd6v +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should get a host IP [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating pod +May 29 18:44:48.524: INFO: Pod pod-hostip-d4b8604c-8241-11e9-bd6e-667e8fbec69d has hostIP: 10.12.157.201 +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:44:48.524: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-lpd6v" for this suite. +May 29 18:45:10.555: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:45:10.585: INFO: namespace: e2e-tests-pods-lpd6v, resource: bindings, ignored listing per whitelist +May 29 18:45:10.833: INFO: namespace e2e-tests-pods-lpd6v deletion completed in 22.300784481s + +• [SLOW TEST:24.657 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should get a host IP [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-api-machinery] Garbage collector + should orphan pods created by rc if delete options say so [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:45:10.834: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename gc +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-vwcnx +STEP: Waiting for a default service account to be provisioned in namespace +[It] should orphan pods created by rc if delete options say so [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the rc +STEP: delete the rc +STEP: wait for the rc to be deleted +STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods +STEP: Gathering metrics +W0529 18:45:51.178804 19 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +May 29 18:45:51.178: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:45:51.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-vwcnx" for this suite. +May 29 18:45:57.207: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:45:57.243: INFO: namespace: e2e-tests-gc-vwcnx, resource: bindings, ignored listing per whitelist +May 29 18:45:57.435: INFO: namespace e2e-tests-gc-vwcnx deletion completed in 6.249844864s + +• [SLOW TEST:46.601 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should orphan pods created by rc if delete options say so [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:45:57.435: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-x2pv8 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with secret that has name projected-secret-test-ff2a66c9-8241-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume secrets +May 29 18:45:57.714: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-x2pv8" to be "success or failure" +May 29 18:45:57.721: INFO: Pod "pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.220738ms +May 29 18:45:59.728: INFO: Pod "pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013461734s +May 29 18:46:01.748: INFO: Pod "pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033863735s +STEP: Saw pod success +May 29 18:46:01.748: INFO: Pod "pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:46:01.755: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d container projected-secret-volume-test: +STEP: delete the pod +May 29 18:46:01.786: INFO: Waiting for pod pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d to disappear +May 29 18:46:01.792: INFO: Pod pod-projected-secrets-ff2b71de-8241-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:46:01.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-x2pv8" for this suite. +May 29 18:46:07.821: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:46:08.373: INFO: namespace: e2e-tests-projected-x2pv8, resource: bindings, ignored listing per whitelist +May 29 18:46:08.519: INFO: namespace e2e-tests-projected-x2pv8 deletion completed in 6.720384635s + +• [SLOW TEST:11.084 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:46:08.520: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-gjpg9 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: validating cluster-info +May 29 18:46:08.773: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 cluster-info' +May 29 18:46:08.885: INFO: stderr: "" +May 29 18:46:08.885: INFO: stdout: "\x1b[0;32mKubernetes master\x1b[0m is running at \x1b[0;33mhttps://10.32.0.1:443\x1b[0m\n\x1b[0;32mCoreDNS\x1b[0m is running at \x1b[0;33mhttps://10.32.0.1:443/api/v1/namespaces/kube-system/services/coredns:dns/proxy\x1b[0m\n\x1b[0;32mkubernetes-dashboard\x1b[0m is running at \x1b[0;33mhttps://10.32.0.1:443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:46:08.885: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-gjpg9" for this suite. +May 29 18:46:14.922: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:46:15.010: INFO: namespace: e2e-tests-kubectl-gjpg9, resource: bindings, ignored listing per whitelist +May 29 18:46:15.177: INFO: namespace e2e-tests-kubectl-gjpg9 deletion completed in 6.283755332s + +• [SLOW TEST:6.657 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl cluster-info + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Projected combined + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected combined + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:46:15.177: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-xzsqd +STEP: Waiting for a default service account to be provisioned in namespace +[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-projected-all-test-volume-09c134a8-8242-11e9-bd6e-667e8fbec69d +STEP: Creating secret with name secret-projected-all-test-volume-09c13480-8242-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test Check all projections for projected volume plugin +May 29 18:46:15.490: INFO: Waiting up to 5m0s for pod "projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-xzsqd" to be "success or failure" +May 29 18:46:15.496: INFO: Pod "projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.973092ms +May 29 18:46:17.504: INFO: Pod "projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013831388s +May 29 18:46:19.511: INFO: Pod "projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021092632s +STEP: Saw pod success +May 29 18:46:19.511: INFO: Pod "projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:46:19.519: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d container projected-all-volume-test: +STEP: delete the pod +May 29 18:46:19.552: INFO: Waiting for pod projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:46:19.557: INFO: Pod projected-volume-09c13420-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected combined + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:46:19.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-xzsqd" for this suite. +May 29 18:46:25.595: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:46:25.808: INFO: namespace: e2e-tests-projected-xzsqd, resource: bindings, ignored listing per whitelist +May 29 18:46:25.821: INFO: namespace e2e-tests-projected-xzsqd deletion completed in 6.256292185s + +• [SLOW TEST:10.644 seconds] +[sig-storage] Projected combined +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_combined.go:31 + should project all components that make up the projection API [Projection][NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:46:25.821: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename configmap +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-dtdkq +STEP: Waiting for a default service account to be provisioned in namespace +[It] binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-upd-10163d16-8242-11e9-bd6e-667e8fbec69d +STEP: Creating the pod +STEP: Waiting for pod with text data +STEP: Waiting for pod with binary data +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:46:30.159: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-dtdkq" for this suite. +May 29 18:46:50.190: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:46:50.266: INFO: namespace: e2e-tests-configmap-dtdkq, resource: bindings, ignored listing per whitelist +May 29 18:46:50.406: INFO: namespace e2e-tests-configmap-dtdkq deletion completed in 20.239633754s + +• [SLOW TEST:24.585 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + binary data should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicaSet + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:46:50.407: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename replicaset +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replicaset-8ww68 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 29 18:46:50.665: INFO: Creating ReplicaSet my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d +May 29 18:46:50.679: INFO: Pod name my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d: Found 0 pods out of 1 +May 29 18:46:55.688: INFO: Pod name my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d: Found 1 pods out of 1 +May 29 18:46:55.688: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d" is running +May 29 18:46:55.696: INFO: Pod "my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d-f8k2n" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:46:50 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:46:53 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:46:53 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:46:50 +0000 UTC Reason: Message:}]) +May 29 18:46:55.696: INFO: Trying to dial the pod +May 29 18:47:00.862: INFO: Controller my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d: Got expected result from replica 1 [my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d-f8k2n]: "my-hostname-basic-1ebcdb84-8242-11e9-bd6e-667e8fbec69d-f8k2n", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicaSet + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:47:00.862: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-replicaset-8ww68" for this suite. +May 29 18:47:06.895: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:47:07.041: INFO: namespace: e2e-tests-replicaset-8ww68, resource: bindings, ignored listing per whitelist +May 29 18:47:07.123: INFO: namespace e2e-tests-replicaset-8ww68 deletion completed in 6.252449937s + +• [SLOW TEST:16.717 seconds] +[sig-apps] ReplicaSet +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:47:07.124: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename pod-network-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-vcjwx +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-vcjwx +STEP: creating a selector +STEP: Creating the service pods in kubernetes +May 29 18:47:07.398: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +May 29 18:47:41.546: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.64.0.41:8080/dial?request=hostName&protocol=http&host=100.64.0.40&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-vcjwx PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 29 18:47:41.546: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +May 29 18:47:41.750: INFO: Waiting for endpoints: map[] +May 29 18:47:41.758: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.64.0.41:8080/dial?request=hostName&protocol=http&host=100.64.1.71&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-vcjwx PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 29 18:47:41.758: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +May 29 18:47:41.931: INFO: Waiting for endpoints: map[] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:47:41.931: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-vcjwx" for this suite. +May 29 18:48:03.963: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:48:03.990: INFO: namespace: e2e-tests-pod-network-test-vcjwx, resource: bindings, ignored listing per whitelist +May 29 18:48:04.208: INFO: namespace e2e-tests-pod-network-test-vcjwx deletion completed in 22.267293751s + +• [SLOW TEST:57.085 seconds] +[sig-network] Networking +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:48:04.209: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-95rt8 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:48:04.562: INFO: Waiting up to 5m0s for pod "downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-95rt8" to be "success or failure" +May 29 18:48:04.568: INFO: Pod "downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.269495ms +May 29 18:48:06.590: INFO: Pod "downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.028642322s +May 29 18:48:08.617: INFO: Pod "downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.054988626s +STEP: Saw pod success +May 29 18:48:08.617: INFO: Pod "downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:48:08.625: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:48:08.679: INFO: Waiting for pod downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:48:08.684: INFO: Pod downwardapi-volume-4ac6a6d7-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:48:08.684: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-95rt8" for this suite. +May 29 18:48:14.714: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:48:14.787: INFO: namespace: e2e-tests-downward-api-95rt8, resource: bindings, ignored listing per whitelist +May 29 18:48:15.015: INFO: namespace e2e-tests-downward-api-95rt8 deletion completed in 6.324077976s + +• [SLOW TEST:10.807 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Downward API volume + should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:48:15.016: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-bps95 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:48:15.349: INFO: Waiting up to 5m0s for pod "downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-bps95" to be "success or failure" +May 29 18:48:15.356: INFO: Pod "downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.315436ms +May 29 18:48:17.370: INFO: Pod "downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021208855s +May 29 18:48:19.378: INFO: Pod "downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029186354s +STEP: Saw pod success +May 29 18:48:19.378: INFO: Pod "downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:48:19.384: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:48:19.411: INFO: Waiting for pod downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:48:19.417: INFO: Pod downwardapi-volume-5134bd9c-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:48:19.417: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-bps95" for this suite. +May 29 18:48:25.446: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:48:25.661: INFO: namespace: e2e-tests-downward-api-bps95, resource: bindings, ignored listing per whitelist +May 29 18:48:25.688: INFO: namespace e2e-tests-downward-api-bps95 deletion completed in 6.264501793s + +• [SLOW TEST:10.672 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should set DefaultMode on files [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:48:25.690: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-v7rln +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-map-578ebd57-8242-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume secrets +May 29 18:48:26.013: INFO: Waiting up to 5m0s for pod "pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-v7rln" to be "success or failure" +May 29 18:48:26.027: INFO: Pod "pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 14.495275ms +May 29 18:48:28.044: INFO: Pod "pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.031242118s +May 29 18:48:30.051: INFO: Pod "pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.038112139s +STEP: Saw pod success +May 29 18:48:30.051: INFO: Pod "pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:48:30.057: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d container secret-volume-test: +STEP: delete the pod +May 29 18:48:30.085: INFO: Waiting for pod pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:48:30.090: INFO: Pod pod-secrets-578fde76-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:48:30.090: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-v7rln" for this suite. +May 29 18:48:36.124: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:48:36.378: INFO: namespace: e2e-tests-secrets-v7rln, resource: bindings, ignored listing per whitelist +May 29 18:48:36.444: INFO: namespace e2e-tests-secrets-v7rln deletion completed in 6.346517485s + +• [SLOW TEST:10.754 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-apps] Daemon set [Serial] + should rollback without unnecessary restarts [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:48:36.444: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename daemonsets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-ztqrk +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should rollback without unnecessary restarts [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 29 18:48:36.739: INFO: Requires at least 2 nodes (not -1) +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +May 29 18:48:36.752: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-ztqrk/daemonsets","resourceVersion":"948835942"},"items":null} + +May 29 18:48:36.757: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-ztqrk/pods","resourceVersion":"948835942"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:48:36.776: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-ztqrk" for this suite. +May 29 18:48:42.815: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:48:43.063: INFO: namespace: e2e-tests-daemonsets-ztqrk, resource: bindings, ignored listing per whitelist +May 29 18:48:43.074: INFO: namespace e2e-tests-daemonsets-ztqrk deletion completed in 6.290708664s + +S [SKIPPING] [6.630 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should rollback without unnecessary restarts [Conformance] [It] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 + + May 29 18:48:36.739: Requires at least 2 nodes (not -1) + + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/util.go:292 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:48:43.075: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename emptydir +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-q7s2b +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0644 on tmpfs +May 29 18:48:43.357: INFO: Waiting up to 5m0s for pod "pod-61e68840-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-q7s2b" to be "success or failure" +May 29 18:48:43.363: INFO: Pod "pod-61e68840-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.529479ms +May 29 18:48:45.372: INFO: Pod "pod-61e68840-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014713977s +May 29 18:48:47.381: INFO: Pod "pod-61e68840-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024002264s +STEP: Saw pod success +May 29 18:48:47.381: INFO: Pod "pod-61e68840-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:48:47.387: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-61e68840-8242-11e9-bd6e-667e8fbec69d container test-container: +STEP: delete the pod +May 29 18:48:47.414: INFO: Waiting for pod pod-61e68840-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:48:47.419: INFO: Pod pod-61e68840-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:48:47.419: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-q7s2b" for this suite. +May 29 18:48:53.447: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:48:53.664: INFO: namespace: e2e-tests-emptydir-q7s2b, resource: bindings, ignored listing per whitelist +May 29 18:48:53.729: INFO: namespace e2e-tests-emptydir-q7s2b deletion completed in 6.302825724s + +• [SLOW TEST:10.654 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run --rm job + should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:48:53.730: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename kubectl +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-qcjm6 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: executing a command with run --rm and attach with stdin +May 29 18:48:54.040: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 --namespace=e2e-tests-kubectl-qcjm6 run e2e-test-rm-busybox-job --image=docker.io/library/busybox:1.29 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed'' +May 29 18:48:55.902: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\nIf you don't see a command prompt, try pressing enter.\n" +May 29 18:48:55.902: INFO: stdout: "abcd1234stdin closed\njob.batch \"e2e-test-rm-busybox-job\" deleted\n" +STEP: verifying the job e2e-test-rm-busybox-job was deleted +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:48:57.915: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-qcjm6" for this suite. +May 29 18:49:05.948: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:49:06.097: INFO: namespace: e2e-tests-kubectl-qcjm6, resource: bindings, ignored listing per whitelist +May 29 18:49:06.232: INFO: namespace e2e-tests-kubectl-qcjm6 deletion completed in 8.307362136s + +• [SLOW TEST:12.502 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run --rm job + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create a job from an image, then delete the job [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:49:06.232: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename downward-api +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-sn2dq +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:49:06.584: INFO: Waiting up to 5m0s for pod "downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-sn2dq" to be "success or failure" +May 29 18:49:06.590: INFO: Pod "downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.370127ms +May 29 18:49:08.597: INFO: Pod "downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013170638s +May 29 18:49:10.625: INFO: Pod "downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.040727745s +STEP: Saw pod success +May 29 18:49:10.625: INFO: Pod "downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:49:10.633: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:49:10.672: INFO: Waiting for pod downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:49:10.682: INFO: Pod downwardapi-volume-6fbddb49-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:49:10.682: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-sn2dq" for this suite. +May 29 18:49:16.728: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:49:17.023: INFO: namespace: e2e-tests-downward-api-sn2dq, resource: bindings, ignored listing per whitelist +May 29 18:49:17.023: INFO: namespace e2e-tests-downward-api-sn2dq deletion completed in 6.333021868s + +• [SLOW TEST:10.791 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for the cluster [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] DNS + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:49:17.024: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename dns +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-dns-5bzln +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for the cluster [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default;check="$$(dig +tcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;test -n "$$(getent hosts dns-querier-1.dns-test-service.e2e-tests-dns-5bzln.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.e2e-tests-dns-5bzln.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-5bzln.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default;check="$$(dig +tcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;test -n "$$(getent hosts dns-querier-1.dns-test-service.e2e-tests-dns-5bzln.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.e2e-tests-dns-5bzln.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-5bzln.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +May 29 18:49:32.695: INFO: DNS probes using e2e-tests-dns-5bzln/dns-test-76206c14-8242-11e9-bd6e-667e8fbec69d succeeded + +STEP: deleting the pod +[AfterEach] [sig-network] DNS + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:49:32.712: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-dns-5bzln" for this suite. +May 29 18:49:38.754: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:49:39.017: INFO: namespace: e2e-tests-dns-5bzln, resource: bindings, ignored listing per whitelist +May 29 18:49:39.065: INFO: namespace e2e-tests-dns-5bzln deletion completed in 6.332483778s + +• [SLOW TEST:22.041 seconds] +[sig-network] DNS +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should provide DNS for the cluster [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-api-machinery] Secrets + should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:49:39.065: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename secrets +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-hfgw8 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-834bb28e-8242-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume secrets +May 29 18:49:39.391: INFO: Waiting up to 5m0s for pod "pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-hfgw8" to be "success or failure" +May 29 18:49:39.397: INFO: Pod "pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.94548ms +May 29 18:49:41.412: INFO: Pod "pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020532431s +May 29 18:49:43.420: INFO: Pod "pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028385634s +STEP: Saw pod success +May 29 18:49:43.420: INFO: Pod "pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:49:43.426: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d container secret-env-test: +STEP: delete the pod +May 29 18:49:43.459: INFO: Waiting for pod pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:49:43.465: INFO: Pod pod-secrets-834cd2f4-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-api-machinery] Secrets + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:49:43.465: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-hfgw8" for this suite. +May 29 18:49:49.493: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:49:49.749: INFO: namespace: e2e-tests-secrets-hfgw8, resource: bindings, ignored listing per whitelist +May 29 18:49:49.786: INFO: namespace e2e-tests-secrets-hfgw8 deletion completed in 6.313964641s + +• [SLOW TEST:10.722 seconds] +[sig-api-machinery] Secrets +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32 + should be consumable from pods in env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:49:49.787: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-s4w5h +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-89aa933f-8242-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:49:50.082: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-s4w5h" to be "success or failure" +May 29 18:49:50.093: INFO: Pod "pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 11.099247ms +May 29 18:49:52.109: INFO: Pod "pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02757247s +May 29 18:49:54.117: INFO: Pod "pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.035517156s +STEP: Saw pod success +May 29 18:49:54.117: INFO: Pod "pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:49:54.124: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d container projected-configmap-volume-test: +STEP: delete the pod +May 29 18:49:54.153: INFO: Waiting for pod pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:49:54.158: INFO: Pod pod-projected-configmaps-89ab94a0-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:49:54.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-s4w5h" for this suite. +May 29 18:50:00.195: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:50:00.385: INFO: namespace: e2e-tests-projected-s4w5h, resource: bindings, ignored listing per whitelist +May 29 18:50:00.523: INFO: namespace e2e-tests-projected-s4w5h deletion completed in 6.351457463s + +• [SLOW TEST:10.736 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:50:00.523: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-7knvq +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-90151290-8242-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:50:00.844: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-7knvq" to be "success or failure" +May 29 18:50:00.850: INFO: Pod "pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.6485ms +May 29 18:50:02.865: INFO: Pod "pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020979274s +May 29 18:50:04.872: INFO: Pod "pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028191023s +STEP: Saw pod success +May 29 18:50:04.872: INFO: Pod "pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:50:04.880: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d container projected-configmap-volume-test: +STEP: delete the pod +May 29 18:50:04.906: INFO: Waiting for pod pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:50:04.914: INFO: Pod pod-projected-configmaps-901619cd-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:50:04.914: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-7knvq" for this suite. +May 29 18:50:10.945: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:50:11.090: INFO: namespace: e2e-tests-projected-7knvq, resource: bindings, ignored listing per whitelist +May 29 18:50:11.211: INFO: namespace e2e-tests-projected-7knvq deletion completed in 6.288461047s + +• [SLOW TEST:10.689 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:50:11.212: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename pod-network-test +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-g7sp7 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for node-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-g7sp7 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +May 29 18:50:11.468: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +May 29 18:50:33.584: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 100.64.0.43 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-g7sp7 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 29 18:50:33.584: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +May 29 18:50:34.734: INFO: Found all expected endpoints: [netserver-0] +May 29 18:50:34.741: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 100.64.1.81 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-g7sp7 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 29 18:50:34.741: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +May 29 18:50:35.904: INFO: Found all expected endpoints: [netserver-1] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:50:35.905: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-g7sp7" for this suite. +May 29 18:50:57.954: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:50:58.164: INFO: namespace: e2e-tests-pod-network-test-g7sp7, resource: bindings, ignored listing per whitelist +May 29 18:50:58.232: INFO: namespace e2e-tests-pod-network-test-g7sp7 deletion completed in 22.309301785s + +• [SLOW TEST:47.020 seconds] +[sig-network] Networking +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for node-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-apps] ReplicationController + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:50:58.232: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename replication-controller +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replication-controller-vjvtj +STEP: Waiting for a default service account to be provisioned in namespace +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating replication controller my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d +May 29 18:50:58.508: INFO: Pod name my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d: Found 0 pods out of 1 +May 29 18:51:03.519: INFO: Pod name my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d: Found 1 pods out of 1 +May 29 18:51:03.519: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d" are running +May 29 18:51:03.526: INFO: Pod "my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d-84lkq" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:50:58 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:51:00 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:51:00 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-29 18:50:58 +0000 UTC Reason: Message:}]) +May 29 18:51:03.526: INFO: Trying to dial the pod +May 29 18:51:08.644: INFO: Controller my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d: Got expected result from replica 1 [my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d-84lkq]: "my-hostname-basic-b274de99-8242-11e9-bd6e-667e8fbec69d-84lkq", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:51:08.644: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-replication-controller-vjvtj" for this suite. +May 29 18:51:14.673: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:51:14.879: INFO: namespace: e2e-tests-replication-controller-vjvtj, resource: bindings, ignored listing per whitelist +May 29 18:51:14.961: INFO: namespace e2e-tests-replication-controller-vjvtj deletion completed in 6.309600409s + +• [SLOW TEST:16.729 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-apps] Deployment + RecreateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:51:14.961: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename deployment +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-hcwgf +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] RecreateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 29 18:51:15.275: INFO: Creating deployment "test-recreate-deployment" +May 29 18:51:15.282: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1 +May 29 18:51:15.299: INFO: deployment "test-recreate-deployment" doesn't have the required revision set +May 29 18:51:17.313: INFO: Waiting deployment "test-recreate-deployment" to complete +May 29 18:51:17.319: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694752675, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694752675, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694752675, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694752675, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-5dfdcc846d\" is progressing."}}, CollisionCount:(*int32)(nil)} +May 29 18:51:19.336: INFO: Triggering a new rollout for deployment "test-recreate-deployment" +May 29 18:51:19.350: INFO: Updating deployment test-recreate-deployment +May 29 18:51:19.350: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +May 29 18:51:19.434: INFO: Deployment "test-recreate-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment,GenerateName:,Namespace:e2e-tests-deployment-hcwgf,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-hcwgf/deployments/test-recreate-deployment,UID:bc75a003-8242-11e9-9b18-c2b4512ea1b9,ResourceVersion:948848631,Generation:2,CreationTimestamp:2019-05-29 18:51:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[{Available False 2019-05-29 18:51:19 +0000 UTC 2019-05-29 18:51:19 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-05-29 18:51:19 +0000 UTC 2019-05-29 18:51:15 +0000 UTC ReplicaSetUpdated ReplicaSet "test-recreate-deployment-697fbf54bf" is progressing.}],ReadyReplicas:0,CollisionCount:nil,},} + +May 29 18:51:19.442: INFO: New ReplicaSet "test-recreate-deployment-697fbf54bf" of Deployment "test-recreate-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-697fbf54bf,GenerateName:,Namespace:e2e-tests-deployment-hcwgf,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-hcwgf/replicasets/test-recreate-deployment-697fbf54bf,UID:bee7a37e-8242-11e9-9b18-c2b4512ea1b9,ResourceVersion:948848628,Generation:1,CreationTimestamp:2019-05-29 18:51:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment bc75a003-8242-11e9-9b18-c2b4512ea1b9 0xc00222ef17 0xc00222ef18}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +May 29 18:51:19.442: INFO: All old ReplicaSets of Deployment "test-recreate-deployment": +May 29 18:51:19.442: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-5dfdcc846d,GenerateName:,Namespace:e2e-tests-deployment-hcwgf,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-hcwgf/replicasets/test-recreate-deployment-5dfdcc846d,UID:bc767ae8-8242-11e9-9b18-c2b4512ea1b9,ResourceVersion:948848618,Generation:2,CreationTimestamp:2019-05-29 18:51:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 5dfdcc846d,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment bc75a003-8242-11e9-9b18-c2b4512ea1b9 0xc00222edd7 0xc00222edd8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 5dfdcc846d,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 5dfdcc846d,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +May 29 18:51:19.449: INFO: Pod "test-recreate-deployment-697fbf54bf-rwq44" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-697fbf54bf-rwq44,GenerateName:test-recreate-deployment-697fbf54bf-,Namespace:e2e-tests-deployment-hcwgf,SelfLink:/api/v1/namespaces/e2e-tests-deployment-hcwgf/pods/test-recreate-deployment-697fbf54bf-rwq44,UID:bee859e1-8242-11e9-9b18-c2b4512ea1b9,ResourceVersion:948848623,Generation:0,CreationTimestamp:2019-05-29 18:51:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-recreate-deployment-697fbf54bf bee7a37e-8242-11e9-9b18-c2b4512ea1b9 0xc00222fa67 0xc00222fa68}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-jmwd8 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-jmwd8,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-jmwd8 true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc00222fad0} {node.kubernetes.io/unreachable Exists NoExecute 0xc00222faf0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 18:51:19 +0000 UTC }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:51:19.449: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-deployment-hcwgf" for this suite. +May 29 18:51:25.481: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:51:25.557: INFO: namespace: e2e-tests-deployment-hcwgf, resource: bindings, ignored listing per whitelist +May 29 18:51:25.757: INFO: namespace e2e-tests-deployment-hcwgf deletion completed in 6.299747219s + +• [SLOW TEST:10.795 seconds] +[sig-apps] Deployment +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + RecreateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:51:25.757: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename sched-pred +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-sched-pred-6bjw6 +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79 +May 29 18:51:26.022: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +May 29 18:51:26.035: INFO: Waiting for terminating namespaces to be deleted... +May 29 18:51:26.041: INFO: +Logging pods the kubelet thinks is on node scw-sono13-default-2865dd8133304358ae8da697bb2 before test +May 29 18:51:26.060: INFO: heapster-d8d4579b6-fzrlt from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container heapster ready: true, restart count 0 +May 29 18:51:26.060: INFO: flannel-nnv2c from kube-system started at 2019-05-29 18:14:10 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container kube-flannel ready: true, restart count 0 +May 29 18:51:26.060: INFO: metrics-server-794596bd9d-x9dz9 from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container metrics-server ready: true, restart count 0 +May 29 18:51:26.060: INFO: node-problem-detector-6bkln from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container node-problem-detector ready: true, restart count 0 +May 29 18:51:26.060: INFO: sonobuoy-systemd-logs-daemon-set-537397329e444263-krxc2 from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded) +May 29 18:51:26.060: INFO: Container sonobuoy-worker ready: true, restart count 0 +May 29 18:51:26.060: INFO: Container systemd-logs ready: true, restart count 0 +May 29 18:51:26.060: INFO: monitoring-influxdb-7c84bfcfc8-snwmn from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container influxdb ready: true, restart count 0 +May 29 18:51:26.060: INFO: kubernetes-dashboard-794fb6974c-d7btd from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container kubernetes-dashboard ready: true, restart count 0 +May 29 18:51:26.060: INFO: coredns-59b5b6c955-ssfxs from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container coredns ready: true, restart count 0 +May 29 18:51:26.060: INFO: kube-proxy-s4qs6 from kube-system started at 2019-05-29 18:14:10 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.060: INFO: Container kube-proxy ready: true, restart count 0 +May 29 18:51:26.060: INFO: +Logging pods the kubelet thinks is on node scw-sono13-default-71171af685174eada6c25c1541e before test +May 29 18:51:26.078: INFO: node-problem-detector-lbd8v from kube-system started at 2019-05-29 18:14:33 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.078: INFO: Container node-problem-detector ready: true, restart count 0 +May 29 18:51:26.079: INFO: kube-proxy-7jxzv from kube-system started at 2019-05-29 18:14:13 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.079: INFO: Container kube-proxy ready: true, restart count 0 +May 29 18:51:26.079: INFO: flannel-8bs82 from kube-system started at 2019-05-29 18:14:14 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.079: INFO: Container kube-flannel ready: true, restart count 0 +May 29 18:51:26.079: INFO: sonobuoy from heptio-sonobuoy started at 2019-05-29 18:15:07 +0000 UTC (1 container statuses recorded) +May 29 18:51:26.079: INFO: Container kube-sonobuoy ready: true, restart count 0 +May 29 18:51:26.079: INFO: sonobuoy-e2e-job-721690eaa8df4a4c from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded) +May 29 18:51:26.079: INFO: Container e2e ready: true, restart count 0 +May 29 18:51:26.079: INFO: Container sonobuoy-worker ready: true, restart count 0 +May 29 18:51:26.079: INFO: sonobuoy-systemd-logs-daemon-set-537397329e444263-ct67t from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded) +May 29 18:51:26.079: INFO: Container sonobuoy-worker ready: true, restart count 0 +May 29 18:51:26.079: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-c5505af4-8242-11e9-bd6e-667e8fbec69d 42 +STEP: Trying to relaunch the pod, now with labels. +STEP: removing the label kubernetes.io/e2e-c5505af4-8242-11e9-bd6e-667e8fbec69d off the node scw-sono13-default-71171af685174eada6c25c1541e +STEP: verifying the node doesn't have the label kubernetes.io/e2e-c5505af4-8242-11e9-bd6e-667e8fbec69d +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:51:34.244: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-sched-pred-6bjw6" for this suite. +May 29 18:51:46.273: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:51:46.401: INFO: namespace: e2e-tests-sched-pred-6bjw6, resource: bindings, ignored listing per whitelist +May 29 18:51:46.588: INFO: namespace e2e-tests-sched-pred-6bjw6 deletion completed in 12.336444097s +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70 + +• [SLOW TEST:20.831 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22 + validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:51:46.588: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename gc +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-8gxx4 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the rc1 +STEP: create the rc2 +STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well +STEP: delete the rc simpletest-rc-to-be-deleted +STEP: wait for the rc to be deleted +STEP: Gathering metrics +W0529 18:51:57.013760 19 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +May 29 18:51:57.013: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:51:57.013: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-8gxx4" for this suite. +May 29 18:52:03.050: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:52:03.124: INFO: namespace: e2e-tests-gc-8gxx4, resource: bindings, ignored listing per whitelist +May 29 18:52:03.333: INFO: namespace e2e-tests-gc-8gxx4 deletion completed in 6.31381752s + +• [SLOW TEST:16.745 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-storage] Projected downwardAPI + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:52:03.333: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-cq67s +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 29 18:52:03.689: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-cq67s" to be "success or failure" +May 29 18:52:03.696: INFO: Pod "downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.636084ms +May 29 18:52:05.704: INFO: Pod "downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014280695s +May 29 18:52:07.711: INFO: Pod "downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021118994s +STEP: Saw pod success +May 29 18:52:07.711: INFO: Pod "downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:52:07.717: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d container client-container: +STEP: delete the pod +May 29 18:52:07.744: INFO: Waiting for pod downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:52:07.750: INFO: Pod downwardapi-volume-d94ed6ac-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:52:07.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-cq67s" for this suite. +May 29 18:52:13.793: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:52:13.890: INFO: namespace: e2e-tests-projected-cq67s, resource: bindings, ignored listing per whitelist +May 29 18:52:14.079: INFO: namespace e2e-tests-projected-cq67s deletion completed in 6.317406937s + +• [SLOW TEST:10.745 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:52:14.079: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename projected +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-c65q8 +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-map-dfb01e2c-8242-11e9-bd6e-667e8fbec69d +STEP: Creating a pod to test consume configMaps +May 29 18:52:14.400: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-c65q8" to be "success or failure" +May 29 18:52:14.408: INFO: Pod "pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.609746ms +May 29 18:52:16.418: INFO: Pod "pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017748908s +May 29 18:52:18.425: INFO: Pod "pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025067726s +STEP: Saw pod success +May 29 18:52:18.425: INFO: Pod "pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure" +May 29 18:52:18.431: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d container projected-configmap-volume-test: +STEP: delete the pod +May 29 18:52:18.457: INFO: Waiting for pod pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d to disappear +May 29 18:52:18.462: INFO: Pod pod-projected-configmaps-dfb136e8-8242-11e9-bd6e-667e8fbec69d no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 29 18:52:18.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-c65q8" for this suite. +May 29 18:52:24.506: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 29 18:52:24.552: INFO: namespace: e2e-tests-projected-c65q8, resource: bindings, ignored listing per whitelist +May 29 18:52:24.769: INFO: namespace e2e-tests-projected-c65q8 deletion completed in 6.298893492s + +• [SLOW TEST:10.690 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] version v1 + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 29 18:52:24.769: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334 +STEP: Building a namespace api object, basename proxy +STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-proxy-4gj7s +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 29 18:52:25.071: INFO: (0) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/:
+alternatives.log
+apt/
+... (200; 13.804147ms)
+May 29 18:52:25.080: INFO: (1) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.84952ms)
+May 29 18:52:25.088: INFO: (2) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 7.631627ms)
+May 29 18:52:25.096: INFO: (3) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.028864ms)
+May 29 18:52:25.105: INFO: (4) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 9.222161ms)
+May 29 18:52:25.114: INFO: (5) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.252696ms)
+May 29 18:52:25.123: INFO: (6) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 9.336256ms)
+May 29 18:52:25.131: INFO: (7) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.354878ms)
+May 29 18:52:25.141: INFO: (8) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 9.197705ms)
+May 29 18:52:25.150: INFO: (9) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.858897ms)
+May 29 18:52:25.158: INFO: (10) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.652339ms)
+May 29 18:52:25.170: INFO: (11) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 11.350588ms)
+May 29 18:52:25.180: INFO: (12) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 10.152343ms)
+May 29 18:52:25.188: INFO: (13) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.119686ms)
+May 29 18:52:25.196: INFO: (14) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.081774ms)
+May 29 18:52:25.205: INFO: (15) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 9.291222ms)
+May 29 18:52:25.215: INFO: (16) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 9.253741ms)
+May 29 18:52:25.224: INFO: (17) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 9.23179ms)
+May 29 18:52:25.232: INFO: (18) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.18627ms)
+May 29 18:52:25.240: INFO: (19) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2:10250/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 7.796447ms)
+[AfterEach] version v1
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:52:25.240: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-proxy-4gj7s" for this suite.
+May 29 18:52:31.269: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:52:31.394: INFO: namespace: e2e-tests-proxy-4gj7s, resource: bindings, ignored listing per whitelist
+May 29 18:52:31.563: INFO: namespace e2e-tests-proxy-4gj7s deletion completed in 6.315975037s
+
+• [SLOW TEST:6.794 seconds]
+[sig-network] Proxy
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  version v1
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/proxy.go:56
+    should proxy logs on node with explicit kubelet port using proxy subresource  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:52:31.563: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-jdqrz
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-ea1637d1-8242-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 18:52:31.848: INFO: Waiting up to 5m0s for pod "pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-jdqrz" to be "success or failure"
+May 29 18:52:31.853: INFO: Pod "pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.838701ms
+May 29 18:52:33.861: INFO: Pod "pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013543515s
+May 29 18:52:35.875: INFO: Pod "pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027925425s
+STEP: Saw pod success
+May 29 18:52:35.875: INFO: Pod "pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 18:52:35.881: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d container configmap-volume-test: 
+STEP: delete the pod
+May 29 18:52:35.912: INFO: Waiting for pod pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d to disappear
+May 29 18:52:35.917: INFO: Pod pod-configmaps-ea1779c1-8242-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:52:35.917: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-jdqrz" for this suite.
+May 29 18:52:41.948: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:52:42.096: INFO: namespace: e2e-tests-configmap-jdqrz, resource: bindings, ignored listing per whitelist
+May 29 18:52:42.221: INFO: namespace e2e-tests-configmap-jdqrz deletion completed in 6.297749602s
+
+• [SLOW TEST:10.658 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should have an terminated reason [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:52:42.221: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-4bq6n
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should have an terminated reason [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:52:46.535: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubelet-test-4bq6n" for this suite.
+May 29 18:52:52.564: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:52:52.622: INFO: namespace: e2e-tests-kubelet-test-4bq6n, resource: bindings, ignored listing per whitelist
+May 29 18:52:52.794: INFO: namespace e2e-tests-kubelet-test-4bq6n deletion completed in 6.251088074s
+
+• [SLOW TEST:10.572 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should have an terminated reason [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:52:52.794: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-mcs5r
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test override arguments
+May 29 18:52:53.123: INFO: Waiting up to 5m0s for pod "client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-containers-mcs5r" to be "success or failure"
+May 29 18:52:53.130: INFO: Pod "client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.855213ms
+May 29 18:52:55.138: INFO: Pod "client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014411145s
+May 29 18:52:57.154: INFO: Pod "client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030206734s
+STEP: Saw pod success
+May 29 18:52:57.154: INFO: Pod "client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 18:52:57.159: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 18:52:57.186: INFO: Waiting for pod client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d to disappear
+May 29 18:52:57.192: INFO: Pod client-containers-f6c5ce6e-8242-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:52:57.192: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-containers-mcs5r" for this suite.
+May 29 18:53:03.222: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:53:03.352: INFO: namespace: e2e-tests-containers-mcs5r, resource: bindings, ignored listing per whitelist
+May 29 18:53:03.602: INFO: namespace e2e-tests-containers-mcs5r deletion completed in 6.402296013s
+
+• [SLOW TEST:10.808 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl version 
+  should check is all data is printed  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:53:03.602: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-zg9wn
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should check is all data is printed  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 18:53:03.871: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 version'
+May 29 18:53:04.020: INFO: stderr: ""
+May 29 18:53:04.020: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.0\", GitCommit:\"ddf47ac13c1a9483ea035a79cd7c10005ff21a6d\", GitTreeState:\"clean\", BuildDate:\"2018-12-03T21:04:45Z\", GoVersion:\"go1.11.2\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.1\", GitCommit:\"eec55b9ba98609a46fee712359c7b5b365bdd920\", GitTreeState:\"clean\", BuildDate:\"2018-12-13T10:31:33Z\", GoVersion:\"go1.11.2\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:53:04.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-zg9wn" for this suite.
+May 29 18:53:10.050: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:53:10.086: INFO: namespace: e2e-tests-kubectl-zg9wn, resource: bindings, ignored listing per whitelist
+May 29 18:53:10.311: INFO: namespace e2e-tests-kubectl-zg9wn deletion completed in 6.281725425s
+
+• [SLOW TEST:6.709 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl version
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should check is all data is printed  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:53:10.317: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename sched-pred
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-sched-pred-fhlhf
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+May 29 18:53:10.626: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+May 29 18:53:10.640: INFO: Waiting for terminating namespaces to be deleted...
+May 29 18:53:10.645: INFO: 
+Logging pods the kubelet thinks is on node scw-sono13-default-2865dd8133304358ae8da697bb2 before test
+May 29 18:53:10.664: INFO: monitoring-influxdb-7c84bfcfc8-snwmn from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container influxdb ready: true, restart count 0
+May 29 18:53:10.664: INFO: metrics-server-794596bd9d-x9dz9 from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container metrics-server ready: true, restart count 0
+May 29 18:53:10.664: INFO: node-problem-detector-6bkln from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container node-problem-detector ready: true, restart count 0
+May 29 18:53:10.664: INFO: sonobuoy-systemd-logs-daemon-set-537397329e444263-krxc2 from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+May 29 18:53:10.664: INFO: 	Container systemd-logs ready: true, restart count 0
+May 29 18:53:10.664: INFO: kubernetes-dashboard-794fb6974c-d7btd from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container kubernetes-dashboard ready: true, restart count 0
+May 29 18:53:10.664: INFO: kube-proxy-s4qs6 from kube-system started at 2019-05-29 18:14:10 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container kube-proxy ready: true, restart count 0
+May 29 18:53:10.664: INFO: coredns-59b5b6c955-ssfxs from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container coredns ready: true, restart count 0
+May 29 18:53:10.664: INFO: flannel-nnv2c from kube-system started at 2019-05-29 18:14:10 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container kube-flannel ready: true, restart count 0
+May 29 18:53:10.664: INFO: heapster-d8d4579b6-fzrlt from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.664: INFO: 	Container heapster ready: true, restart count 0
+May 29 18:53:10.664: INFO: 
+Logging pods the kubelet thinks is on node scw-sono13-default-71171af685174eada6c25c1541e before test
+May 29 18:53:10.678: INFO: node-problem-detector-lbd8v from kube-system started at 2019-05-29 18:14:33 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.678: INFO: 	Container node-problem-detector ready: true, restart count 0
+May 29 18:53:10.678: INFO: kube-proxy-7jxzv from kube-system started at 2019-05-29 18:14:13 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.678: INFO: 	Container kube-proxy ready: true, restart count 0
+May 29 18:53:10.678: INFO: flannel-8bs82 from kube-system started at 2019-05-29 18:14:14 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.678: INFO: 	Container kube-flannel ready: true, restart count 0
+May 29 18:53:10.678: INFO: sonobuoy from heptio-sonobuoy started at 2019-05-29 18:15:07 +0000 UTC (1 container statuses recorded)
+May 29 18:53:10.678: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+May 29 18:53:10.678: INFO: sonobuoy-e2e-job-721690eaa8df4a4c from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded)
+May 29 18:53:10.679: INFO: 	Container e2e ready: true, restart count 0
+May 29 18:53:10.679: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+May 29 18:53:10.679: INFO: sonobuoy-systemd-logs-daemon-set-537397329e444263-ct67t from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded)
+May 29 18:53:10.679: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+May 29 18:53:10.679: INFO: 	Container systemd-logs ready: true, restart count 0
+[It] validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Trying to schedule Pod with nonempty NodeSelector.
+STEP: Considering event: 
+Type = [Warning], Name = [restricted-pod.15a33c1cdfd9d24f], Reason = [FailedScheduling], Message = [0/2 nodes are available: 2 node(s) didn't match node selector.]
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:53:11.721: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-sched-pred-fhlhf" for this suite.
+May 29 18:53:17.751: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:53:17.834: INFO: namespace: e2e-tests-sched-pred-fhlhf, resource: bindings, ignored listing per whitelist
+May 29 18:53:17.998: INFO: namespace e2e-tests-sched-pred-fhlhf deletion completed in 6.268286159s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:7.681 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with downward pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:53:17.998: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-8hpxw
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with downward pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-downwardapi-9pdq
+STEP: Creating a pod to test atomic-volume-subpath
+May 29 18:53:18.325: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-9pdq" in namespace "e2e-tests-subpath-8hpxw" to be "success or failure"
+May 29 18:53:18.334: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Pending", Reason="", readiness=false. Elapsed: 9.221648ms
+May 29 18:53:20.343: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017829564s
+May 29 18:53:22.350: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 4.024932626s
+May 29 18:53:24.357: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 6.031910299s
+May 29 18:53:26.365: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 8.040069314s
+May 29 18:53:28.380: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 10.054930163s
+May 29 18:53:30.390: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 12.065471587s
+May 29 18:53:32.397: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 14.072492595s
+May 29 18:53:34.405: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 16.080160389s
+May 29 18:53:36.413: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 18.087819756s
+May 29 18:53:38.431: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 20.10573857s
+May 29 18:53:40.438: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Running", Reason="", readiness=false. Elapsed: 22.112812071s
+May 29 18:53:42.445: INFO: Pod "pod-subpath-test-downwardapi-9pdq": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.120497031s
+STEP: Saw pod success
+May 29 18:53:42.445: INFO: Pod "pod-subpath-test-downwardapi-9pdq" satisfied condition "success or failure"
+May 29 18:53:42.452: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-subpath-test-downwardapi-9pdq container test-container-subpath-downwardapi-9pdq: 
+STEP: delete the pod
+May 29 18:53:42.483: INFO: Waiting for pod pod-subpath-test-downwardapi-9pdq to disappear
+May 29 18:53:42.491: INFO: Pod pod-subpath-test-downwardapi-9pdq no longer exists
+STEP: Deleting pod pod-subpath-test-downwardapi-9pdq
+May 29 18:53:42.491: INFO: Deleting pod "pod-subpath-test-downwardapi-9pdq" in namespace "e2e-tests-subpath-8hpxw"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:53:42.498: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-8hpxw" for this suite.
+May 29 18:53:48.540: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:53:48.723: INFO: namespace: e2e-tests-subpath-8hpxw, resource: bindings, ignored listing per whitelist
+May 29 18:53:48.813: INFO: namespace e2e-tests-subpath-8hpxw deletion completed in 6.307443086s
+
+• [SLOW TEST:30.815 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with downward pod [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:53:48.813: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-jdzf6
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-jdzf6
+May 29 18:53:53.157: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-jdzf6
+STEP: checking the pod's current state and verifying that restartCount is present
+May 29 18:53:53.163: INFO: Initial restart count of pod liveness-http is 0
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:57:54.399: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-jdzf6" for this suite.
+May 29 18:58:00.435: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:58:00.510: INFO: namespace: e2e-tests-container-probe-jdzf6, resource: bindings, ignored listing per whitelist
+May 29 18:58:00.740: INFO: namespace e2e-tests-container-probe-jdzf6 deletion completed in 6.334033534s
+
+• [SLOW TEST:251.927 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] CustomResourceDefinition resources Simple CustomResourceDefinition 
+  creating/deleting custom resource definition objects works  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:58:00.741: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename custom-resource-definition
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-custom-resource-definition-9ws2m
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] creating/deleting custom resource definition objects works  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 18:58:01.060: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+[AfterEach] [sig-api-machinery] CustomResourceDefinition resources
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:58:02.176: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-custom-resource-definition-9ws2m" for this suite.
+May 29 18:58:08.206: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:58:08.325: INFO: namespace: e2e-tests-custom-resource-definition-9ws2m, resource: bindings, ignored listing per whitelist
+May 29 18:58:08.493: INFO: namespace e2e-tests-custom-resource-definition-9ws2m deletion completed in 6.308605726s
+
+• [SLOW TEST:7.752 seconds]
+[sig-api-machinery] CustomResourceDefinition resources
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  Simple CustomResourceDefinition
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:35
+    creating/deleting custom resource definition objects works  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:58:08.493: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-8tsbv
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the pod with lifecycle hook
+STEP: delete the pod with lifecycle hook
+May 29 18:58:16.875: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:16.881: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:18.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:18.889: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:20.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:20.896: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:22.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:22.889: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:24.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:24.889: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:26.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:26.888: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:28.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:28.888: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:30.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:30.888: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:32.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:32.898: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:34.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:34.889: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:36.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:36.888: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:38.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:38.889: INFO: Pod pod-with-prestop-exec-hook still exists
+May 29 18:58:40.881: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+May 29 18:58:40.888: INFO: Pod pod-with-prestop-exec-hook no longer exists
+STEP: check prestop hook
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:58:40.907: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-8tsbv" for this suite.
+May 29 18:59:02.941: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:59:03.087: INFO: namespace: e2e-tests-container-lifecycle-hook-8tsbv, resource: bindings, ignored listing per whitelist
+May 29 18:59:03.228: INFO: namespace e2e-tests-container-lifecycle-hook-8tsbv deletion completed in 22.313736453s
+
+• [SLOW TEST:54.735 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute prestop exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:59:03.229: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-wjtj2
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0529 18:59:09.609084      19 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May 29 18:59:09.609: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:59:09.609: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-wjtj2" for this suite.
+May 29 18:59:15.639: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:59:15.856: INFO: namespace: e2e-tests-gc-wjtj2, resource: bindings, ignored listing per whitelist
+May 29 18:59:15.928: INFO: namespace e2e-tests-gc-wjtj2 deletion completed in 6.311596168s
+
+• [SLOW TEST:12.699 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with configmap pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:59:15.928: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-gcd5h
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with configmap pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-configmap-h28b
+STEP: Creating a pod to test atomic-volume-subpath
+May 29 18:59:16.238: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-h28b" in namespace "e2e-tests-subpath-gcd5h" to be "success or failure"
+May 29 18:59:16.246: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Pending", Reason="", readiness=false. Elapsed: 7.586599ms
+May 29 18:59:18.254: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015404584s
+May 29 18:59:20.271: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 4.033018281s
+May 29 18:59:22.279: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 6.040707491s
+May 29 18:59:24.286: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 8.04774776s
+May 29 18:59:26.293: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 10.055030764s
+May 29 18:59:28.301: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 12.063021118s
+May 29 18:59:30.318: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 14.079812985s
+May 29 18:59:32.326: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 16.087309469s
+May 29 18:59:34.333: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 18.094990758s
+May 29 18:59:36.344: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 20.105556003s
+May 29 18:59:38.352: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Running", Reason="", readiness=false. Elapsed: 22.113695063s
+May 29 18:59:40.368: INFO: Pod "pod-subpath-test-configmap-h28b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.129513655s
+STEP: Saw pod success
+May 29 18:59:40.368: INFO: Pod "pod-subpath-test-configmap-h28b" satisfied condition "success or failure"
+May 29 18:59:40.375: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-subpath-test-configmap-h28b container test-container-subpath-configmap-h28b: 
+STEP: delete the pod
+May 29 18:59:40.406: INFO: Waiting for pod pod-subpath-test-configmap-h28b to disappear
+May 29 18:59:40.413: INFO: Pod pod-subpath-test-configmap-h28b no longer exists
+STEP: Deleting pod pod-subpath-test-configmap-h28b
+May 29 18:59:40.413: INFO: Deleting pod "pod-subpath-test-configmap-h28b" in namespace "e2e-tests-subpath-gcd5h"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:59:40.419: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-gcd5h" for this suite.
+May 29 18:59:46.454: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:59:46.674: INFO: namespace: e2e-tests-subpath-gcd5h, resource: bindings, ignored listing per whitelist
+May 29 18:59:46.776: INFO: namespace e2e-tests-subpath-gcd5h deletion completed in 6.347711871s
+
+• [SLOW TEST:30.848 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with configmap pod [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:59:46.777: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-56h4z
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May 29 18:59:47.112: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-56h4z" to be "success or failure"
+May 29 18:59:47.118: INFO: Pod "downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.477784ms
+May 29 18:59:49.130: INFO: Pod "downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018502836s
+May 29 18:59:51.145: INFO: Pod "downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.032989639s
+STEP: Saw pod success
+May 29 18:59:51.145: INFO: Pod "downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 18:59:51.151: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d container client-container: 
+STEP: delete the pod
+May 29 18:59:51.192: INFO: Waiting for pod downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d to disappear
+May 29 18:59:51.199: INFO: Pod downwardapi-volume-ed872d73-8243-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:59:51.199: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-56h4z" for this suite.
+May 29 18:59:57.230: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 18:59:57.352: INFO: namespace: e2e-tests-projected-56h4z, resource: bindings, ignored listing per whitelist
+May 29 18:59:57.551: INFO: namespace e2e-tests-projected-56h4z deletion completed in 6.343903965s
+
+• [SLOW TEST:10.774 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run default 
+  should create an rc or deployment from an image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 18:59:57.551: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-z2wvn
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl run default
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1262
+[It] should create an rc or deployment from an image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May 29 18:59:57.880: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-z2wvn'
+May 29 18:59:58.562: INFO: stderr: "kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+May 29 18:59:58.562: INFO: stdout: "deployment.apps/e2e-test-nginx-deployment created\n"
+STEP: verifying the pod controlled by e2e-test-nginx-deployment gets created
+[AfterEach] [k8s.io] Kubectl run default
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1268
+May 29 18:59:58.569: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete deployment e2e-test-nginx-deployment --namespace=e2e-tests-kubectl-z2wvn'
+May 29 18:59:58.714: INFO: stderr: ""
+May 29 18:59:58.714: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 18:59:58.714: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-z2wvn" for this suite.
+May 29 19:00:04.743: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:00:04.944: INFO: namespace: e2e-tests-kubectl-z2wvn, resource: bindings, ignored listing per whitelist
+May 29 19:00:05.074: INFO: namespace e2e-tests-kubectl-z2wvn deletion completed in 6.352476336s
+
+• [SLOW TEST:7.523 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run default
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create an rc or deployment from an image  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[k8s.io] [sig-node] Events 
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:00:05.074: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename events
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-events-h9g4f
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: retrieving the pod
+May 29 19:00:09.481: INFO: &Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:send-events-f8748cba-8243-11e9-bd6e-667e8fbec69d,GenerateName:,Namespace:e2e-tests-events-h9g4f,SelfLink:/api/v1/namespaces/e2e-tests-events-h9g4f/pods/send-events-f8748cba-8243-11e9-bd6e-667e8fbec69d,UID:f875b454-8243-11e9-9b18-c2b4512ea1b9,ResourceVersion:948889653,Generation:0,CreationTimestamp:2019-05-29 19:00:05 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: foo,time: 431834059,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-b5h4g {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-b5h4g,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{p gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 [] []  [{ 0 80 TCP }] [] [] {map[] map[]} [{default-token-b5h4g true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001797cb0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001797cd0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:00:05 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:00:07 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:00:07 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:00:05 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:100.64.1.108,StartTime:2019-05-29 19:00:05 +0000 UTC,ContainerStatuses:[{p {nil ContainerStateRunning{StartedAt:2019-05-29 19:00:06 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 docker-pullable://gcr.io/kubernetes-e2e-test-images/serve-hostname@sha256:bab70473a6d8ef65a22625dc9a1b0f0452e811530fdbe77e4408523460177ff1 docker://f1f2b917c1db03c3e92f3271f1f308914b5605eaf7f791845ae649c24cda3673}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+
+STEP: checking for scheduler event about the pod
+May 29 19:00:11.489: INFO: Saw scheduler event for our pod.
+STEP: checking for kubelet event about the pod
+May 29 19:00:13.507: INFO: Saw kubelet event for our pod.
+STEP: deleting the pod
+[AfterEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:00:13.518: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-events-h9g4f" for this suite.
+May 29 19:00:55.552: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:00:55.660: INFO: namespace: e2e-tests-events-h9g4f, resource: bindings, ignored listing per whitelist
+May 29 19:00:55.867: INFO: namespace e2e-tests-events-h9g4f deletion completed in 42.338589557s
+
+• [SLOW TEST:50.793 seconds]
+[k8s.io] [sig-node] Events
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:00:55.868: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-pq9g4
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0666 on node default medium
+May 29 19:00:56.161: INFO: Waiting up to 5m0s for pod "pod-16af51cc-8244-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-pq9g4" to be "success or failure"
+May 29 19:00:56.167: INFO: Pod "pod-16af51cc-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.015522ms
+May 29 19:00:58.185: INFO: Pod "pod-16af51cc-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023781216s
+May 29 19:01:00.192: INFO: Pod "pod-16af51cc-8244-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030582952s
+STEP: Saw pod success
+May 29 19:01:00.192: INFO: Pod "pod-16af51cc-8244-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:01:00.198: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-16af51cc-8244-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:01:00.226: INFO: Waiting for pod pod-16af51cc-8244-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:01:00.231: INFO: Pod pod-16af51cc-8244-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:01:00.231: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-pq9g4" for this suite.
+May 29 19:01:06.267: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:01:06.620: INFO: namespace: e2e-tests-emptydir-pq9g4, resource: bindings, ignored listing per whitelist
+May 29 19:01:07.084: INFO: namespace e2e-tests-emptydir-pq9g4 deletion completed in 6.842314803s
+
+• [SLOW TEST:11.217 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with secret pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:01:07.085: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-qjp8r
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with secret pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-secret-gw9q
+STEP: Creating a pod to test atomic-volume-subpath
+May 29 19:01:07.435: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-gw9q" in namespace "e2e-tests-subpath-qjp8r" to be "success or failure"
+May 29 19:01:07.444: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Pending", Reason="", readiness=false. Elapsed: 8.687307ms
+May 29 19:01:09.460: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025249557s
+May 29 19:01:11.467: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 4.032394749s
+May 29 19:01:13.475: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 6.040579737s
+May 29 19:01:15.483: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 8.047853232s
+May 29 19:01:17.490: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 10.055277903s
+May 29 19:01:19.507: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 12.072399321s
+May 29 19:01:21.515: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 14.080424042s
+May 29 19:01:23.523: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 16.088101472s
+May 29 19:01:25.531: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 18.096289504s
+May 29 19:01:27.539: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 20.104223811s
+May 29 19:01:29.556: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Running", Reason="", readiness=false. Elapsed: 22.121145839s
+May 29 19:01:31.563: INFO: Pod "pod-subpath-test-secret-gw9q": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.128184163s
+STEP: Saw pod success
+May 29 19:01:31.563: INFO: Pod "pod-subpath-test-secret-gw9q" satisfied condition "success or failure"
+May 29 19:01:31.570: INFO: Trying to get logs from node scw-sono13-default-2865dd8133304358ae8da697bb2 pod pod-subpath-test-secret-gw9q container test-container-subpath-secret-gw9q: 
+STEP: delete the pod
+May 29 19:01:31.634: INFO: Waiting for pod pod-subpath-test-secret-gw9q to disappear
+May 29 19:01:31.639: INFO: Pod pod-subpath-test-secret-gw9q no longer exists
+STEP: Deleting pod pod-subpath-test-secret-gw9q
+May 29 19:01:31.639: INFO: Deleting pod "pod-subpath-test-secret-gw9q" in namespace "e2e-tests-subpath-qjp8r"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:01:31.645: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-qjp8r" for this suite.
+May 29 19:01:37.675: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:01:37.863: INFO: namespace: e2e-tests-subpath-qjp8r, resource: bindings, ignored listing per whitelist
+May 29 19:01:37.958: INFO: namespace e2e-tests-subpath-qjp8r deletion completed in 6.30488591s
+
+• [SLOW TEST:30.873 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with secret pod [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:01:37.958: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-mv9sj
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0644 on node default medium
+May 29 19:01:38.259: INFO: Waiting up to 5m0s for pod "pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-mv9sj" to be "success or failure"
+May 29 19:01:38.265: INFO: Pod "pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.863125ms
+May 29 19:01:40.281: INFO: Pod "pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021608405s
+May 29 19:01:42.288: INFO: Pod "pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028980743s
+STEP: Saw pod success
+May 29 19:01:42.288: INFO: Pod "pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:01:42.294: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:01:42.322: INFO: Waiting for pod pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:01:42.328: INFO: Pod pod-2fc75aab-8244-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:01:42.328: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-mv9sj" for this suite.
+May 29 19:01:48.366: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:01:48.479: INFO: namespace: e2e-tests-emptydir-mv9sj, resource: bindings, ignored listing per whitelist
+May 29 19:01:48.671: INFO: namespace e2e-tests-emptydir-mv9sj deletion completed in 6.334768613s
+
+• [SLOW TEST:10.713 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] HostPath 
+  should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:01:48.671: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename hostpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-hostpath-fkw5w
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:37
+[It] should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test hostPath mode
+May 29 19:01:48.957: INFO: Waiting up to 5m0s for pod "pod-host-path-test" in namespace "e2e-tests-hostpath-fkw5w" to be "success or failure"
+May 29 19:01:48.965: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 8.273699ms
+May 29 19:01:50.985: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027376268s
+May 29 19:01:52.991: INFO: Pod "pod-host-path-test": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.034267045s
+STEP: Saw pod success
+May 29 19:01:52.991: INFO: Pod "pod-host-path-test" satisfied condition "success or failure"
+May 29 19:01:52.998: INFO: Trying to get logs from node scw-sono13-default-2865dd8133304358ae8da697bb2 pod pod-host-path-test container test-container-1: 
+STEP: delete the pod
+May 29 19:01:53.027: INFO: Waiting for pod pod-host-path-test to disappear
+May 29 19:01:53.033: INFO: Pod pod-host-path-test no longer exists
+[AfterEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:01:53.033: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-hostpath-fkw5w" for this suite.
+May 29 19:01:59.064: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:01:59.165: INFO: namespace: e2e-tests-hostpath-fkw5w, resource: bindings, ignored listing per whitelist
+May 29 19:01:59.304: INFO: namespace e2e-tests-hostpath-fkw5w deletion completed in 6.260853572s
+
+• [SLOW TEST:10.633 seconds]
+[sig-storage] HostPath
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:34
+  should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:01:59.305: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-tkg5v
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for all rs to be garbage collected
+STEP: expected 0 pods, got 2 pods
+STEP: Gathering metrics
+W0529 19:02:00.655380      19 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May 29 19:02:00.655: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:02:00.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-tkg5v" for this suite.
+May 29 19:02:06.689: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:02:06.879: INFO: namespace: e2e-tests-gc-tkg5v, resource: bindings, ignored listing per whitelist
+May 29 19:02:06.911: INFO: namespace e2e-tests-gc-tkg5v deletion completed in 6.249024655s
+
+• [SLOW TEST:7.607 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:02:06.912: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-7zpsd
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-4108776e-8244-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:02:07.214: INFO: Waiting up to 5m0s for pod "pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-7zpsd" to be "success or failure"
+May 29 19:02:07.220: INFO: Pod "pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.270029ms
+May 29 19:02:09.227: INFO: Pod "pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012729111s
+May 29 19:02:11.233: INFO: Pod "pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019087986s
+STEP: Saw pod success
+May 29 19:02:11.233: INFO: Pod "pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:02:11.238: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d container secret-volume-test: 
+STEP: delete the pod
+May 29 19:02:11.270: INFO: Waiting for pod pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:02:11.277: INFO: Pod pod-secrets-41098140-8244-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:02:11.277: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-7zpsd" for this suite.
+May 29 19:02:17.306: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:02:17.337: INFO: namespace: e2e-tests-secrets-7zpsd, resource: bindings, ignored listing per whitelist
+May 29 19:02:17.542: INFO: namespace e2e-tests-secrets-7zpsd deletion completed in 6.256921305s
+
+• [SLOW TEST:10.631 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute prestop http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:02:17.545: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-knmgb
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute prestop http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the pod with lifecycle hook
+STEP: delete the pod with lifecycle hook
+May 29 19:02:25.884: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+May 29 19:02:25.890: INFO: Pod pod-with-prestop-http-hook still exists
+May 29 19:02:27.890: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+May 29 19:02:27.898: INFO: Pod pod-with-prestop-http-hook still exists
+May 29 19:02:29.890: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+May 29 19:02:29.922: INFO: Pod pod-with-prestop-http-hook still exists
+May 29 19:02:31.890: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+May 29 19:02:31.920: INFO: Pod pod-with-prestop-http-hook still exists
+May 29 19:02:33.890: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+May 29 19:02:33.907: INFO: Pod pod-with-prestop-http-hook still exists
+May 29 19:02:35.890: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+May 29 19:02:35.897: INFO: Pod pod-with-prestop-http-hook no longer exists
+STEP: check prestop hook
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:02:35.910: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-knmgb" for this suite.
+May 29 19:02:57.942: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:02:58.191: INFO: namespace: e2e-tests-container-lifecycle-hook-knmgb, resource: bindings, ignored listing per whitelist
+May 29 19:02:58.250: INFO: namespace e2e-tests-container-lifecycle-hook-knmgb deletion completed in 22.33186306s
+
+• [SLOW TEST:40.705 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute prestop http hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:02:58.251: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-pqxbp
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-5fa651d7-8244-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 19:02:58.589: INFO: Waiting up to 5m0s for pod "pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-pqxbp" to be "success or failure"
+May 29 19:02:58.596: INFO: Pod "pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.639976ms
+May 29 19:03:00.604: INFO: Pod "pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014714743s
+May 29 19:03:02.614: INFO: Pod "pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025259247s
+STEP: Saw pod success
+May 29 19:03:02.614: INFO: Pod "pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:03:02.621: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d container configmap-volume-test: 
+STEP: delete the pod
+May 29 19:03:02.648: INFO: Waiting for pod pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:03:02.653: INFO: Pod pod-configmaps-5fa75f80-8244-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:03:02.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-pqxbp" for this suite.
+May 29 19:03:08.687: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:03:08.824: INFO: namespace: e2e-tests-configmap-pqxbp, resource: bindings, ignored listing per whitelist
+May 29 19:03:08.926: INFO: namespace e2e-tests-configmap-pqxbp deletion completed in 6.265432526s
+
+• [SLOW TEST:10.675 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:03:08.926: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename init-container
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-8ncsz
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+May 29 19:03:09.226: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:03:13.530: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-8ncsz" for this suite.
+May 29 19:03:35.559: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:03:35.598: INFO: namespace: e2e-tests-init-container-8ncsz, resource: bindings, ignored listing per whitelist
+May 29 19:03:35.863: INFO: namespace e2e-tests-init-container-8ncsz deletion completed in 22.325955574s
+
+• [SLOW TEST:26.937 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:03:35.864: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename replication-controller
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replication-controller-hchgf
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Given a Pod with a 'name' label pod-adoption is created
+STEP: When a replication controller with a matching selector is created
+STEP: Then the orphan pod is adopted
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:03:41.200: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replication-controller-hchgf" for this suite.
+May 29 19:04:03.247: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:04:03.556: INFO: namespace: e2e-tests-replication-controller-hchgf, resource: bindings, ignored listing per whitelist
+May 29 19:04:03.603: INFO: namespace e2e-tests-replication-controller-hchgf deletion completed in 22.393671682s
+
+• [SLOW TEST:27.739 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should adopt matching pods on creation [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Downward API volume 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:04:03.603: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-9f9rx
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating the pod
+May 29 19:04:08.486: INFO: Successfully updated pod "annotationupdate86985b20-8244-11e9-bd6e-667e8fbec69d"
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:04:10.519: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-9f9rx" for this suite.
+May 29 19:04:32.550: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:04:32.799: INFO: namespace: e2e-tests-downward-api-9f9rx, resource: bindings, ignored listing per whitelist
+May 29 19:04:32.839: INFO: namespace e2e-tests-downward-api-9f9rx deletion completed in 22.310267587s
+
+• [SLOW TEST:29.236 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class 
+  should be submitted and removed  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:04:32.841: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-7gp9p
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:204
+[It] should be submitted and removed  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying QOS class is set on the pod
+[AfterEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:04:33.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-7gp9p" for this suite.
+May 29 19:04:55.192: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:04:55.265: INFO: namespace: e2e-tests-pods-7gp9p, resource: bindings, ignored listing per whitelist
+May 29 19:04:55.480: INFO: namespace e2e-tests-pods-7gp9p deletion completed in 22.314051887s
+
+• [SLOW TEST:22.639 seconds]
+[k8s.io] [sig-node] Pods Extended
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should be submitted and removed  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:04:55.480: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename namespaces
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-namespaces-2lllq
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a test namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-qtgdl
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a service in the namespace
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-lv6pk
+STEP: Verifying there is no service in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:05:02.102: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-namespaces-2lllq" for this suite.
+May 29 19:05:08.135: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:05:08.824: INFO: namespace: e2e-tests-namespaces-2lllq, resource: bindings, ignored listing per whitelist
+May 29 19:05:08.978: INFO: namespace e2e-tests-namespaces-2lllq deletion completed in 6.864907763s
+STEP: Destroying namespace "e2e-tests-nsdeletetest-qtgdl" for this suite.
+May 29 19:05:08.985: INFO: Namespace e2e-tests-nsdeletetest-qtgdl was already deleted
+STEP: Destroying namespace "e2e-tests-nsdeletetest-lv6pk" for this suite.
+May 29 19:05:15.007: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:05:15.121: INFO: namespace: e2e-tests-nsdeletetest-lv6pk, resource: bindings, ignored listing per whitelist
+May 29 19:05:15.221: INFO: namespace e2e-tests-nsdeletetest-lv6pk deletion completed in 6.236920332s
+
+• [SLOW TEST:19.741 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:05:15.222: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename watch
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-g6hw7
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating a watch on configmaps
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: closing the watch once it receives two notifications
+May 29 19:05:15.557: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-g6hw7,SelfLink:/api/v1/namespaces/e2e-tests-watch-g6hw7/configmaps/e2e-watch-test-watch-closed,UID:b14bad72-8244-11e9-9b18-c2b4512ea1b9,ResourceVersion:948913372,Generation:0,CreationTimestamp:2019-05-29 19:05:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},}
+May 29 19:05:15.558: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-g6hw7,SelfLink:/api/v1/namespaces/e2e-tests-watch-g6hw7/configmaps/e2e-watch-test-watch-closed,UID:b14bad72-8244-11e9-9b18-c2b4512ea1b9,ResourceVersion:948913375,Generation:0,CreationTimestamp:2019-05-29 19:05:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time, while the watch is closed
+STEP: creating a new watch on configmaps from the last resource version observed by the first watch
+STEP: deleting the configmap
+STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed
+May 29 19:05:15.600: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-g6hw7,SelfLink:/api/v1/namespaces/e2e-tests-watch-g6hw7/configmaps/e2e-watch-test-watch-closed,UID:b14bad72-8244-11e9-9b18-c2b4512ea1b9,ResourceVersion:948913377,Generation:0,CreationTimestamp:2019-05-29 19:05:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+May 29 19:05:15.600: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-g6hw7,SelfLink:/api/v1/namespaces/e2e-tests-watch-g6hw7/configmaps/e2e-watch-test-watch-closed,UID:b14bad72-8244-11e9-9b18-c2b4512ea1b9,ResourceVersion:948913381,Generation:0,CreationTimestamp:2019-05-29 19:05:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:05:15.600: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-watch-g6hw7" for this suite.
+May 29 19:05:21.641: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:05:21.900: INFO: namespace: e2e-tests-watch-g6hw7, resource: bindings, ignored listing per whitelist
+May 29 19:05:21.929: INFO: namespace e2e-tests-watch-g6hw7 deletion completed in 6.318190263s
+
+• [SLOW TEST:6.707 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:05:21.929: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename var-expansion
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-var-expansion-fk6sh
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test substitution in container's args
+May 29 19:05:22.217: INFO: Waiting up to 5m0s for pod "var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-var-expansion-fk6sh" to be "success or failure"
+May 29 19:05:22.224: INFO: Pod "var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.895715ms
+May 29 19:05:24.231: INFO: Pod "var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013494371s
+May 29 19:05:26.238: INFO: Pod "var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020347751s
+STEP: Saw pod success
+May 29 19:05:26.238: INFO: Pod "var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:05:26.244: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d container dapi-container: 
+STEP: delete the pod
+May 29 19:05:26.273: INFO: Waiting for pod var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:05:26.278: INFO: Pod var-expansion-b5448e92-8244-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:05:26.278: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-var-expansion-fk6sh" for this suite.
+May 29 19:05:32.316: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:05:32.572: INFO: namespace: e2e-tests-var-expansion-fk6sh, resource: bindings, ignored listing per whitelist
+May 29 19:05:32.597: INFO: namespace e2e-tests-var-expansion-fk6sh deletion completed in 6.311249675s
+
+• [SLOW TEST:10.668 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should allow substituting values in a container's args [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy through a service and a pod  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] version v1
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:05:32.600: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename proxy
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-proxy-jvhl2
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy through a service and a pod  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: starting an echo server on multiple ports
+STEP: creating replication controller proxy-service-dt27d in namespace e2e-tests-proxy-jvhl2
+I0529 19:05:32.898808      19 runners.go:184] Created replication controller with name: proxy-service-dt27d, namespace: e2e-tests-proxy-jvhl2, replica count: 1
+I0529 19:05:33.952484      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0529 19:05:34.952729      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0529 19:05:35.953083      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0529 19:05:36.953421      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:37.953740      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:38.955559      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:39.955808      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:40.956094      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:41.956392      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:42.956578      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:43.956777      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:44.956988      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady 
+I0529 19:05:45.957260      19 runners.go:184] proxy-service-dt27d Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+May 29 19:05:45.970: INFO: setup took 13.100597444s, starting test cases
+STEP: running 16 cases, 20 attempts per case, 320 total attempts
+May 29 19:05:46.021: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-jvhl2/pods/http:proxy-service-dt27d-qhhcp:1080/proxy/: >> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-services-4d6jk
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85
+[It] should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating service multi-endpoint-test in namespace e2e-tests-services-4d6jk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-4d6jk to expose endpoints map[]
+May 29 19:06:00.349: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-4d6jk exposes endpoints map[] (6.493727ms elapsed)
+STEP: Creating pod pod1 in namespace e2e-tests-services-4d6jk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-4d6jk to expose endpoints map[pod1:[100]]
+May 29 19:06:03.422: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-4d6jk exposes endpoints map[pod1:[100]] (3.060858494s elapsed)
+STEP: Creating pod pod2 in namespace e2e-tests-services-4d6jk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-4d6jk to expose endpoints map[pod2:[101] pod1:[100]]
+May 29 19:06:06.508: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-4d6jk exposes endpoints map[pod1:[100] pod2:[101]] (3.077661203s elapsed)
+STEP: Deleting pod pod1 in namespace e2e-tests-services-4d6jk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-4d6jk to expose endpoints map[pod2:[101]]
+May 29 19:06:06.531: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-4d6jk exposes endpoints map[pod2:[101]] (14.326404ms elapsed)
+STEP: Deleting pod pod2 in namespace e2e-tests-services-4d6jk
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-4d6jk to expose endpoints map[]
+May 29 19:06:06.548: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-4d6jk exposes endpoints map[] (6.086794ms elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:06:06.583: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-services-4d6jk" for this suite.
+May 29 19:06:28.614: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:06:28.759: INFO: namespace: e2e-tests-services-4d6jk, resource: bindings, ignored listing per whitelist
+May 29 19:06:28.852: INFO: namespace e2e-tests-services-4d6jk deletion completed in 22.259641316s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90
+
+• [SLOW TEST:28.804 seconds]
+[sig-network] Services
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Secrets 
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:06:28.852: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-k5g6d
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating secret e2e-tests-secrets-k5g6d/secret-test-dd3097d3-8244-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:06:29.202: INFO: Waiting up to 5m0s for pod "pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-k5g6d" to be "success or failure"
+May 29 19:06:29.210: INFO: Pod "pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.756642ms
+May 29 19:06:31.229: INFO: Pod "pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.027548073s
+May 29 19:06:33.237: INFO: Pod "pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.035246575s
+STEP: Saw pod success
+May 29 19:06:33.237: INFO: Pod "pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:06:33.244: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d container env-test: 
+STEP: delete the pod
+May 29 19:06:33.272: INFO: Waiting for pod pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:06:33.278: INFO: Pod pod-configmaps-dd317071-8244-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:06:33.278: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-k5g6d" for this suite.
+May 29 19:06:39.927: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:06:40.151: INFO: namespace: e2e-tests-secrets-k5g6d, resource: bindings, ignored listing per whitelist
+May 29 19:06:40.233: INFO: namespace e2e-tests-secrets-k5g6d deletion completed in 6.808514138s
+
+• [SLOW TEST:11.380 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-network] Service endpoints latency 
+  should not be very high  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Service endpoints latency
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:06:40.233: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename svc-latency
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-svc-latency-gsgx2
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be very high  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating replication controller svc-latency-rc in namespace e2e-tests-svc-latency-gsgx2
+I0529 19:06:40.544479      19 runners.go:184] Created replication controller with name: svc-latency-rc, namespace: e2e-tests-svc-latency-gsgx2, replica count: 1
+I0529 19:06:41.594841      19 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0529 19:06:42.595003      19 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0529 19:06:43.595209      19 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+May 29 19:06:43.717: INFO: Created: latency-svc-74h4c
+May 29 19:06:43.722: INFO: Got endpoints: latency-svc-74h4c [27.076334ms]
+May 29 19:06:43.742: INFO: Created: latency-svc-lpqgs
+May 29 19:06:43.747: INFO: Got endpoints: latency-svc-lpqgs [25.328889ms]
+May 29 19:06:43.753: INFO: Created: latency-svc-4xqnz
+May 29 19:06:43.759: INFO: Got endpoints: latency-svc-4xqnz [37.145935ms]
+May 29 19:06:43.765: INFO: Created: latency-svc-kk488
+May 29 19:06:43.771: INFO: Got endpoints: latency-svc-kk488 [48.794464ms]
+May 29 19:06:43.780: INFO: Created: latency-svc-wxnq8
+May 29 19:06:43.783: INFO: Got endpoints: latency-svc-wxnq8 [59.973141ms]
+May 29 19:06:43.822: INFO: Created: latency-svc-b5nwt
+May 29 19:06:43.827: INFO: Got endpoints: latency-svc-b5nwt [105.039916ms]
+May 29 19:06:43.836: INFO: Created: latency-svc-r7h9l
+May 29 19:06:43.840: INFO: Got endpoints: latency-svc-r7h9l [117.220165ms]
+May 29 19:06:43.849: INFO: Created: latency-svc-rs5cw
+May 29 19:06:43.906: INFO: Got endpoints: latency-svc-rs5cw [183.818902ms]
+May 29 19:06:43.916: INFO: Created: latency-svc-whmhz
+May 29 19:06:43.920: INFO: Got endpoints: latency-svc-whmhz [198.356022ms]
+May 29 19:06:43.930: INFO: Created: latency-svc-58bgq
+May 29 19:06:43.935: INFO: Got endpoints: latency-svc-58bgq [212.101154ms]
+May 29 19:06:43.941: INFO: Created: latency-svc-zqxd6
+May 29 19:06:43.944: INFO: Got endpoints: latency-svc-zqxd6 [222.015803ms]
+May 29 19:06:43.952: INFO: Created: latency-svc-ss7bd
+May 29 19:06:43.958: INFO: Got endpoints: latency-svc-ss7bd [235.601335ms]
+May 29 19:06:43.963: INFO: Created: latency-svc-6pwfx
+May 29 19:06:43.967: INFO: Got endpoints: latency-svc-6pwfx [244.425624ms]
+May 29 19:06:43.979: INFO: Created: latency-svc-nf8hs
+May 29 19:06:43.986: INFO: Got endpoints: latency-svc-nf8hs [263.091336ms]
+May 29 19:06:43.988: INFO: Created: latency-svc-9cws2
+May 29 19:06:44.025: INFO: Got endpoints: latency-svc-9cws2 [302.260445ms]
+May 29 19:06:44.031: INFO: Created: latency-svc-wwz9h
+May 29 19:06:44.031: INFO: Got endpoints: latency-svc-wwz9h [308.775863ms]
+May 29 19:06:44.039: INFO: Created: latency-svc-gwgnz
+May 29 19:06:44.043: INFO: Got endpoints: latency-svc-gwgnz [295.297754ms]
+May 29 19:06:44.051: INFO: Created: latency-svc-ghqz6
+May 29 19:06:44.055: INFO: Got endpoints: latency-svc-ghqz6 [295.592248ms]
+May 29 19:06:44.062: INFO: Created: latency-svc-5dzmv
+May 29 19:06:44.074: INFO: Created: latency-svc-bztl5
+May 29 19:06:44.074: INFO: Got endpoints: latency-svc-5dzmv [303.143655ms]
+May 29 19:06:44.122: INFO: Got endpoints: latency-svc-bztl5 [66.431212ms]
+May 29 19:06:44.131: INFO: Created: latency-svc-zjrm2
+May 29 19:06:44.135: INFO: Got endpoints: latency-svc-zjrm2 [352.336058ms]
+May 29 19:06:44.143: INFO: Created: latency-svc-9r2hw
+May 29 19:06:44.147: INFO: Got endpoints: latency-svc-9r2hw [319.591187ms]
+May 29 19:06:44.154: INFO: Created: latency-svc-4hw4l
+May 29 19:06:44.163: INFO: Got endpoints: latency-svc-4hw4l [323.439762ms]
+May 29 19:06:44.165: INFO: Created: latency-svc-lcmsb
+May 29 19:06:44.171: INFO: Got endpoints: latency-svc-lcmsb [263.575937ms]
+May 29 19:06:44.219: INFO: Created: latency-svc-gklws
+May 29 19:06:44.222: INFO: Got endpoints: latency-svc-gklws [301.37562ms]
+May 29 19:06:44.229: INFO: Created: latency-svc-2hxcp
+May 29 19:06:44.235: INFO: Got endpoints: latency-svc-2hxcp [300.589774ms]
+May 29 19:06:44.242: INFO: Created: latency-svc-x9rp6
+May 29 19:06:44.246: INFO: Got endpoints: latency-svc-x9rp6 [301.157209ms]
+May 29 19:06:44.253: INFO: Created: latency-svc-5q8hd
+May 29 19:06:44.257: INFO: Got endpoints: latency-svc-5q8hd [298.540199ms]
+May 29 19:06:44.264: INFO: Created: latency-svc-7thw9
+May 29 19:06:44.268: INFO: Got endpoints: latency-svc-7thw9 [301.401209ms]
+May 29 19:06:44.278: INFO: Created: latency-svc-6pxzz
+May 29 19:06:44.319: INFO: Got endpoints: latency-svc-6pxzz [333.532095ms]
+May 29 19:06:44.325: INFO: Created: latency-svc-4fbc6
+May 29 19:06:44.419: INFO: Got endpoints: latency-svc-4fbc6 [393.599874ms]
+May 29 19:06:44.423: INFO: Created: latency-svc-lbrz8
+May 29 19:06:44.429: INFO: Got endpoints: latency-svc-lbrz8 [397.88531ms]
+May 29 19:06:44.724: INFO: Created: latency-svc-5tlrh
+May 29 19:06:44.825: INFO: Got endpoints: latency-svc-5tlrh [782.288795ms]
+May 29 19:06:44.920: INFO: Created: latency-svc-ctrgg
+May 29 19:06:44.925: INFO: Got endpoints: latency-svc-ctrgg [850.677777ms]
+May 29 19:06:44.956: INFO: Created: latency-svc-vjcq7
+May 29 19:06:45.021: INFO: Got endpoints: latency-svc-vjcq7 [899.36533ms]
+May 29 19:06:45.029: INFO: Created: latency-svc-rzxp6
+May 29 19:06:45.034: INFO: Got endpoints: latency-svc-rzxp6 [898.969266ms]
+May 29 19:06:45.042: INFO: Created: latency-svc-pwzp9
+May 29 19:06:45.048: INFO: Got endpoints: latency-svc-pwzp9 [900.827179ms]
+May 29 19:06:45.054: INFO: Created: latency-svc-6ftmn
+May 29 19:06:45.059: INFO: Got endpoints: latency-svc-6ftmn [895.973705ms]
+May 29 19:06:45.073: INFO: Created: latency-svc-zhzp2
+May 29 19:06:45.073: INFO: Got endpoints: latency-svc-zhzp2 [902.467672ms]
+May 29 19:06:45.077: INFO: Created: latency-svc-s6z9x
+May 29 19:06:45.083: INFO: Got endpoints: latency-svc-s6z9x [860.293564ms]
+May 29 19:06:45.119: INFO: Created: latency-svc-qcz6r
+May 29 19:06:45.127: INFO: Got endpoints: latency-svc-qcz6r [891.917388ms]
+May 29 19:06:45.131: INFO: Created: latency-svc-69vsb
+May 29 19:06:45.137: INFO: Got endpoints: latency-svc-69vsb [890.949518ms]
+May 29 19:06:45.142: INFO: Created: latency-svc-m9g8m
+May 29 19:06:45.149: INFO: Got endpoints: latency-svc-m9g8m [891.967601ms]
+May 29 19:06:45.154: INFO: Created: latency-svc-fbmxn
+May 29 19:06:45.164: INFO: Got endpoints: latency-svc-fbmxn [895.373545ms]
+May 29 19:06:45.167: INFO: Created: latency-svc-x2kcl
+May 29 19:06:45.171: INFO: Got endpoints: latency-svc-x2kcl [852.016603ms]
+May 29 19:06:45.178: INFO: Created: latency-svc-zkw89
+May 29 19:06:45.222: INFO: Got endpoints: latency-svc-zkw89 [803.596473ms]
+May 29 19:06:45.226: INFO: Created: latency-svc-87kb9
+May 29 19:06:45.232: INFO: Got endpoints: latency-svc-87kb9 [802.668489ms]
+May 29 19:06:45.236: INFO: Created: latency-svc-hf67s
+May 29 19:06:45.240: INFO: Got endpoints: latency-svc-hf67s [414.67242ms]
+May 29 19:06:45.250: INFO: Created: latency-svc-xjc8j
+May 29 19:06:45.252: INFO: Got endpoints: latency-svc-xjc8j [327.235037ms]
+May 29 19:06:45.262: INFO: Created: latency-svc-h2qzg
+May 29 19:06:45.267: INFO: Got endpoints: latency-svc-h2qzg [245.823552ms]
+May 29 19:06:45.279: INFO: Created: latency-svc-lwp4s
+May 29 19:06:45.286: INFO: Got endpoints: latency-svc-lwp4s [251.665825ms]
+May 29 19:06:45.336: INFO: Created: latency-svc-4cn22
+May 29 19:06:45.342: INFO: Got endpoints: latency-svc-4cn22 [293.85447ms]
+May 29 19:06:45.347: INFO: Created: latency-svc-8clrl
+May 29 19:06:45.351: INFO: Got endpoints: latency-svc-8clrl [291.759041ms]
+May 29 19:06:45.358: INFO: Created: latency-svc-hwtcl
+May 29 19:06:45.364: INFO: Got endpoints: latency-svc-hwtcl [290.684281ms]
+May 29 19:06:45.370: INFO: Created: latency-svc-4s58q
+May 29 19:06:45.376: INFO: Got endpoints: latency-svc-4s58q [293.120931ms]
+May 29 19:06:45.382: INFO: Created: latency-svc-bgc47
+May 29 19:06:45.390: INFO: Got endpoints: latency-svc-bgc47 [262.980436ms]
+May 29 19:06:45.419: INFO: Created: latency-svc-kgksv
+May 29 19:06:45.423: INFO: Got endpoints: latency-svc-kgksv [286.020743ms]
+May 29 19:06:45.436: INFO: Created: latency-svc-6fb8q
+May 29 19:06:45.442: INFO: Got endpoints: latency-svc-6fb8q [293.4767ms]
+May 29 19:06:45.445: INFO: Created: latency-svc-z6gcq
+May 29 19:06:45.447: INFO: Got endpoints: latency-svc-z6gcq [283.000154ms]
+May 29 19:06:45.458: INFO: Created: latency-svc-m8jlb
+May 29 19:06:45.462: INFO: Got endpoints: latency-svc-m8jlb [290.52414ms]
+May 29 19:06:45.472: INFO: Created: latency-svc-55tsm
+May 29 19:06:45.474: INFO: Got endpoints: latency-svc-55tsm [251.151398ms]
+May 29 19:06:45.481: INFO: Created: latency-svc-shmmd
+May 29 19:06:45.523: INFO: Got endpoints: latency-svc-shmmd [290.826137ms]
+May 29 19:06:45.523: INFO: Created: latency-svc-7hmr7
+May 29 19:06:45.529: INFO: Got endpoints: latency-svc-7hmr7 [289.52936ms]
+May 29 19:06:45.537: INFO: Created: latency-svc-b5sxg
+May 29 19:06:45.539: INFO: Got endpoints: latency-svc-b5sxg [286.27429ms]
+May 29 19:06:45.546: INFO: Created: latency-svc-9svbc
+May 29 19:06:45.551: INFO: Got endpoints: latency-svc-9svbc [264.261085ms]
+May 29 19:06:45.559: INFO: Created: latency-svc-dt6kz
+May 29 19:06:45.564: INFO: Got endpoints: latency-svc-dt6kz [297.321559ms]
+May 29 19:06:45.570: INFO: Created: latency-svc-b5bmt
+May 29 19:06:45.575: INFO: Got endpoints: latency-svc-b5bmt [233.288155ms]
+May 29 19:06:45.619: INFO: Created: latency-svc-2j2cw
+May 29 19:06:45.624: INFO: Got endpoints: latency-svc-2j2cw [272.698326ms]
+May 29 19:06:45.654: INFO: Created: latency-svc-82bmn
+May 29 19:06:45.654: INFO: Created: latency-svc-hwvlg
+May 29 19:06:45.660: INFO: Created: latency-svc-h4gbz
+May 29 19:06:45.667: INFO: Created: latency-svc-wj8vb
+May 29 19:06:45.671: INFO: Got endpoints: latency-svc-hwvlg [307.079794ms]
+May 29 19:06:45.681: INFO: Created: latency-svc-sc9vq
+May 29 19:06:45.689: INFO: Created: latency-svc-przsd
+May 29 19:06:45.729: INFO: Got endpoints: latency-svc-82bmn [349.889306ms]
+May 29 19:06:45.732: INFO: Created: latency-svc-mdszp
+May 29 19:06:45.745: INFO: Created: latency-svc-zqkp9
+May 29 19:06:45.758: INFO: Created: latency-svc-m4xxb
+May 29 19:06:45.771: INFO: Created: latency-svc-2nqmd
+May 29 19:06:45.771: INFO: Got endpoints: latency-svc-h4gbz [378.405526ms]
+May 29 19:06:45.783: INFO: Created: latency-svc-x5cb7
+May 29 19:06:45.795: INFO: Created: latency-svc-l9xl4
+May 29 19:06:45.819: INFO: Created: latency-svc-5sgn5
+May 29 19:06:45.821: INFO: Created: latency-svc-84nnq
+May 29 19:06:45.822: INFO: Got endpoints: latency-svc-wj8vb [399.031164ms]
+May 29 19:06:45.838: INFO: Created: latency-svc-stb9q
+May 29 19:06:45.851: INFO: Created: latency-svc-h5jlb
+May 29 19:06:45.863: INFO: Created: latency-svc-5qgjf
+May 29 19:06:45.872: INFO: Got endpoints: latency-svc-sc9vq [429.636743ms]
+May 29 19:06:45.882: INFO: Created: latency-svc-94g5p
+May 29 19:06:45.886: INFO: Created: latency-svc-wxtl5
+May 29 19:06:45.931: INFO: Got endpoints: latency-svc-przsd [483.549409ms]
+May 29 19:06:45.936: INFO: Created: latency-svc-46q6s
+May 29 19:06:45.953: INFO: Created: latency-svc-26s5k
+May 29 19:06:45.973: INFO: Got endpoints: latency-svc-mdszp [509.962316ms]
+May 29 19:06:45.995: INFO: Created: latency-svc-8pw7x
+May 29 19:06:46.023: INFO: Got endpoints: latency-svc-zqkp9 [549.00645ms]
+May 29 19:06:46.044: INFO: Created: latency-svc-62k95
+May 29 19:06:46.072: INFO: Got endpoints: latency-svc-m4xxb [542.673293ms]
+May 29 19:06:46.094: INFO: Created: latency-svc-d6f9t
+May 29 19:06:46.122: INFO: Got endpoints: latency-svc-2nqmd [592.93159ms]
+May 29 19:06:46.142: INFO: Created: latency-svc-bsgp6
+May 29 19:06:46.171: INFO: Got endpoints: latency-svc-x5cb7 [632.529234ms]
+May 29 19:06:46.194: INFO: Created: latency-svc-qt6nt
+May 29 19:06:46.223: INFO: Got endpoints: latency-svc-l9xl4 [672.416532ms]
+May 29 19:06:46.246: INFO: Created: latency-svc-cbgjj
+May 29 19:06:46.273: INFO: Got endpoints: latency-svc-5sgn5 [708.782881ms]
+May 29 19:06:46.296: INFO: Created: latency-svc-gz8pc
+May 29 19:06:46.323: INFO: Got endpoints: latency-svc-84nnq [747.59568ms]
+May 29 19:06:46.345: INFO: Created: latency-svc-x8vgd
+May 29 19:06:46.373: INFO: Got endpoints: latency-svc-stb9q [748.766241ms]
+May 29 19:06:46.398: INFO: Created: latency-svc-6tlwx
+May 29 19:06:46.423: INFO: Got endpoints: latency-svc-h5jlb [751.426773ms]
+May 29 19:06:46.445: INFO: Created: latency-svc-fqb4j
+May 29 19:06:46.473: INFO: Got endpoints: latency-svc-5qgjf [744.708381ms]
+May 29 19:06:46.495: INFO: Created: latency-svc-d7qsc
+May 29 19:06:46.524: INFO: Got endpoints: latency-svc-94g5p [752.806175ms]
+May 29 19:06:46.545: INFO: Created: latency-svc-fjtr6
+May 29 19:06:46.572: INFO: Got endpoints: latency-svc-wxtl5 [750.184717ms]
+May 29 19:06:46.597: INFO: Created: latency-svc-lmktp
+May 29 19:06:46.624: INFO: Got endpoints: latency-svc-46q6s [751.943844ms]
+May 29 19:06:46.650: INFO: Created: latency-svc-f4jpj
+May 29 19:06:46.681: INFO: Got endpoints: latency-svc-26s5k [750.105799ms]
+May 29 19:06:46.704: INFO: Created: latency-svc-mb4gv
+May 29 19:06:46.723: INFO: Got endpoints: latency-svc-8pw7x [750.026399ms]
+May 29 19:06:46.746: INFO: Created: latency-svc-5rzwl
+May 29 19:06:46.778: INFO: Got endpoints: latency-svc-62k95 [754.864405ms]
+May 29 19:06:46.807: INFO: Created: latency-svc-gvnw5
+May 29 19:06:46.824: INFO: Got endpoints: latency-svc-d6f9t [752.289653ms]
+May 29 19:06:46.850: INFO: Created: latency-svc-vlfww
+May 29 19:06:46.872: INFO: Got endpoints: latency-svc-bsgp6 [750.195486ms]
+May 29 19:06:46.900: INFO: Created: latency-svc-dj77l
+May 29 19:06:46.922: INFO: Got endpoints: latency-svc-qt6nt [751.043542ms]
+May 29 19:06:46.945: INFO: Created: latency-svc-snj7d
+May 29 19:06:46.974: INFO: Got endpoints: latency-svc-cbgjj [750.997594ms]
+May 29 19:06:47.025: INFO: Got endpoints: latency-svc-gz8pc [751.679316ms]
+May 29 19:06:47.124: INFO: Got endpoints: latency-svc-x8vgd [801.023804ms]
+May 29 19:06:47.124: INFO: Got endpoints: latency-svc-6tlwx [750.934776ms]
+May 29 19:06:47.321: INFO: Got endpoints: latency-svc-d7qsc [847.27164ms]
+May 29 19:06:47.322: INFO: Got endpoints: latency-svc-fqb4j [898.918207ms]
+May 29 19:06:47.420: INFO: Got endpoints: latency-svc-lmktp [844.609987ms]
+May 29 19:06:47.420: INFO: Got endpoints: latency-svc-fjtr6 [896.035248ms]
+May 29 19:06:47.432: INFO: Got endpoints: latency-svc-mb4gv [751.145101ms]
+May 29 19:06:47.432: INFO: Got endpoints: latency-svc-f4jpj [808.019477ms]
+May 29 19:06:47.437: INFO: Created: latency-svc-v6f79
+May 29 19:06:47.526: INFO: Got endpoints: latency-svc-vlfww [699.32044ms]
+May 29 19:06:47.526: INFO: Got endpoints: latency-svc-5rzwl [802.987495ms]
+May 29 19:06:47.527: INFO: Created: latency-svc-w9q6v
+May 29 19:06:47.542: INFO: Created: latency-svc-vch6w
+May 29 19:06:47.558: INFO: Created: latency-svc-8dtqj
+May 29 19:06:47.572: INFO: Got endpoints: latency-svc-gvnw5 [791.627379ms]
+May 29 19:06:47.574: INFO: Created: latency-svc-dczjz
+May 29 19:06:47.621: INFO: Created: latency-svc-7j79h
+May 29 19:06:47.621: INFO: Got endpoints: latency-svc-dj77l [745.327825ms]
+May 29 19:06:47.630: INFO: Created: latency-svc-bvgz2
+May 29 19:06:47.643: INFO: Created: latency-svc-f7vjq
+May 29 19:06:47.655: INFO: Created: latency-svc-wkhg6
+May 29 19:06:47.669: INFO: Created: latency-svc-bqmh7
+May 29 19:06:47.674: INFO: Got endpoints: latency-svc-snj7d [751.539042ms]
+May 29 19:06:47.681: INFO: Created: latency-svc-lj9lh
+May 29 19:06:47.691: INFO: Created: latency-svc-wnrm9
+May 29 19:06:47.706: INFO: Created: latency-svc-fj8vx
+May 29 19:06:47.726: INFO: Created: latency-svc-54v7h
+May 29 19:06:47.733: INFO: Got endpoints: latency-svc-v6f79 [758.386264ms]
+May 29 19:06:47.736: INFO: Created: latency-svc-xrsmp
+May 29 19:06:47.753: INFO: Created: latency-svc-nlgjl
+May 29 19:06:47.773: INFO: Got endpoints: latency-svc-w9q6v [747.865926ms]
+May 29 19:06:47.793: INFO: Created: latency-svc-6pmp4
+May 29 19:06:47.827: INFO: Got endpoints: latency-svc-vch6w [702.323977ms]
+May 29 19:06:47.849: INFO: Created: latency-svc-dn4f4
+May 29 19:06:47.873: INFO: Got endpoints: latency-svc-8dtqj [749.068608ms]
+May 29 19:06:47.893: INFO: Created: latency-svc-r9lnq
+May 29 19:06:47.922: INFO: Got endpoints: latency-svc-dczjz [601.11869ms]
+May 29 19:06:47.942: INFO: Created: latency-svc-dx96g
+May 29 19:06:47.972: INFO: Got endpoints: latency-svc-7j79h [650.295584ms]
+May 29 19:06:47.991: INFO: Created: latency-svc-6pwxq
+May 29 19:06:48.022: INFO: Got endpoints: latency-svc-bvgz2 [602.290002ms]
+May 29 19:06:48.047: INFO: Created: latency-svc-2snhd
+May 29 19:06:48.073: INFO: Got endpoints: latency-svc-f7vjq [652.405941ms]
+May 29 19:06:48.112: INFO: Created: latency-svc-d8hww
+May 29 19:06:48.122: INFO: Got endpoints: latency-svc-wkhg6 [690.3522ms]
+May 29 19:06:48.146: INFO: Created: latency-svc-htkpv
+May 29 19:06:48.172: INFO: Got endpoints: latency-svc-bqmh7 [739.456576ms]
+May 29 19:06:48.190: INFO: Created: latency-svc-gpfhf
+May 29 19:06:48.224: INFO: Got endpoints: latency-svc-lj9lh [697.789903ms]
+May 29 19:06:48.245: INFO: Created: latency-svc-gggcb
+May 29 19:06:48.272: INFO: Got endpoints: latency-svc-wnrm9 [746.073587ms]
+May 29 19:06:48.293: INFO: Created: latency-svc-j6njk
+May 29 19:06:48.323: INFO: Got endpoints: latency-svc-fj8vx [750.76721ms]
+May 29 19:06:48.344: INFO: Created: latency-svc-wdbb6
+May 29 19:06:48.374: INFO: Got endpoints: latency-svc-54v7h [752.715273ms]
+May 29 19:06:48.400: INFO: Created: latency-svc-4qm4j
+May 29 19:06:48.426: INFO: Got endpoints: latency-svc-xrsmp [752.207428ms]
+May 29 19:06:48.445: INFO: Created: latency-svc-9wlsm
+May 29 19:06:48.473: INFO: Got endpoints: latency-svc-nlgjl [740.05416ms]
+May 29 19:06:48.494: INFO: Created: latency-svc-xc6mp
+May 29 19:06:48.526: INFO: Got endpoints: latency-svc-6pmp4 [752.571642ms]
+May 29 19:06:48.546: INFO: Created: latency-svc-h7rtr
+May 29 19:06:48.572: INFO: Got endpoints: latency-svc-dn4f4 [744.491673ms]
+May 29 19:06:48.597: INFO: Created: latency-svc-tr8j4
+May 29 19:06:48.624: INFO: Got endpoints: latency-svc-r9lnq [750.279734ms]
+May 29 19:06:48.646: INFO: Created: latency-svc-c8hjl
+May 29 19:06:48.674: INFO: Got endpoints: latency-svc-dx96g [751.513664ms]
+May 29 19:06:48.701: INFO: Created: latency-svc-r2djs
+May 29 19:06:48.723: INFO: Got endpoints: latency-svc-6pwxq [750.642585ms]
+May 29 19:06:48.747: INFO: Created: latency-svc-dszwf
+May 29 19:06:48.775: INFO: Got endpoints: latency-svc-2snhd [752.816263ms]
+May 29 19:06:48.798: INFO: Created: latency-svc-hvrzs
+May 29 19:06:48.822: INFO: Got endpoints: latency-svc-d8hww [749.49592ms]
+May 29 19:06:48.846: INFO: Created: latency-svc-xxm4f
+May 29 19:06:48.872: INFO: Got endpoints: latency-svc-htkpv [749.599775ms]
+May 29 19:06:48.893: INFO: Created: latency-svc-t4l68
+May 29 19:06:48.922: INFO: Got endpoints: latency-svc-gpfhf [750.475382ms]
+May 29 19:06:48.951: INFO: Created: latency-svc-ccttv
+May 29 19:06:48.974: INFO: Got endpoints: latency-svc-gggcb [749.955985ms]
+May 29 19:06:48.997: INFO: Created: latency-svc-c5vnd
+May 29 19:06:49.022: INFO: Got endpoints: latency-svc-j6njk [749.968189ms]
+May 29 19:06:49.050: INFO: Created: latency-svc-ps54n
+May 29 19:06:49.073: INFO: Got endpoints: latency-svc-wdbb6 [749.855115ms]
+May 29 19:06:49.098: INFO: Created: latency-svc-cwvv5
+May 29 19:06:49.123: INFO: Got endpoints: latency-svc-4qm4j [748.24618ms]
+May 29 19:06:49.146: INFO: Created: latency-svc-z5j6d
+May 29 19:06:49.173: INFO: Got endpoints: latency-svc-9wlsm [746.630994ms]
+May 29 19:06:49.193: INFO: Created: latency-svc-lp47p
+May 29 19:06:49.223: INFO: Got endpoints: latency-svc-xc6mp [749.745318ms]
+May 29 19:06:49.242: INFO: Created: latency-svc-v45xq
+May 29 19:06:49.272: INFO: Got endpoints: latency-svc-h7rtr [745.728727ms]
+May 29 19:06:49.293: INFO: Created: latency-svc-25nh6
+May 29 19:06:49.330: INFO: Got endpoints: latency-svc-tr8j4 [757.999116ms]
+May 29 19:06:49.349: INFO: Created: latency-svc-czxjm
+May 29 19:06:49.373: INFO: Got endpoints: latency-svc-c8hjl [748.834706ms]
+May 29 19:06:49.398: INFO: Created: latency-svc-gjd58
+May 29 19:06:49.421: INFO: Got endpoints: latency-svc-r2djs [747.510677ms]
+May 29 19:06:49.446: INFO: Created: latency-svc-x5tp8
+May 29 19:06:49.474: INFO: Got endpoints: latency-svc-dszwf [751.466894ms]
+May 29 19:06:49.502: INFO: Created: latency-svc-d9l6g
+May 29 19:06:49.522: INFO: Got endpoints: latency-svc-hvrzs [746.718528ms]
+May 29 19:06:49.548: INFO: Created: latency-svc-zj7s9
+May 29 19:06:49.573: INFO: Got endpoints: latency-svc-xxm4f [750.467185ms]
+May 29 19:06:49.601: INFO: Created: latency-svc-gjjc5
+May 29 19:06:49.623: INFO: Got endpoints: latency-svc-t4l68 [750.984919ms]
+May 29 19:06:49.655: INFO: Created: latency-svc-bnzvg
+May 29 19:06:49.672: INFO: Got endpoints: latency-svc-ccttv [750.087742ms]
+May 29 19:06:49.694: INFO: Created: latency-svc-8c29m
+May 29 19:06:49.725: INFO: Got endpoints: latency-svc-c5vnd [751.249045ms]
+May 29 19:06:49.746: INFO: Created: latency-svc-6kql8
+May 29 19:06:49.773: INFO: Got endpoints: latency-svc-ps54n [745.019052ms]
+May 29 19:06:49.793: INFO: Created: latency-svc-g2lx8
+May 29 19:06:49.825: INFO: Got endpoints: latency-svc-cwvv5 [751.631624ms]
+May 29 19:06:49.851: INFO: Created: latency-svc-hdfm9
+May 29 19:06:49.872: INFO: Got endpoints: latency-svc-z5j6d [748.741942ms]
+May 29 19:06:49.892: INFO: Created: latency-svc-x8mjl
+May 29 19:06:49.924: INFO: Got endpoints: latency-svc-lp47p [751.279623ms]
+May 29 19:06:49.945: INFO: Created: latency-svc-n4s4w
+May 29 19:06:49.973: INFO: Got endpoints: latency-svc-v45xq [749.616195ms]
+May 29 19:06:49.993: INFO: Created: latency-svc-mxr65
+May 29 19:06:50.023: INFO: Got endpoints: latency-svc-25nh6 [750.806333ms]
+May 29 19:06:50.042: INFO: Created: latency-svc-bh4ww
+May 29 19:06:50.073: INFO: Got endpoints: latency-svc-czxjm [743.330027ms]
+May 29 19:06:50.092: INFO: Created: latency-svc-mmwnh
+May 29 19:06:50.123: INFO: Got endpoints: latency-svc-gjd58 [749.450664ms]
+May 29 19:06:50.144: INFO: Created: latency-svc-xpv4d
+May 29 19:06:50.173: INFO: Got endpoints: latency-svc-x5tp8 [750.787277ms]
+May 29 19:06:50.191: INFO: Created: latency-svc-tbnk8
+May 29 19:06:50.223: INFO: Got endpoints: latency-svc-d9l6g [747.953189ms]
+May 29 19:06:50.241: INFO: Created: latency-svc-6p4zt
+May 29 19:06:50.272: INFO: Got endpoints: latency-svc-zj7s9 [746.854703ms]
+May 29 19:06:50.295: INFO: Created: latency-svc-tnfxq
+May 29 19:06:50.323: INFO: Got endpoints: latency-svc-gjjc5 [743.503113ms]
+May 29 19:06:50.342: INFO: Created: latency-svc-cdq9w
+May 29 19:06:50.373: INFO: Got endpoints: latency-svc-bnzvg [750.004374ms]
+May 29 19:06:50.393: INFO: Created: latency-svc-n28zs
+May 29 19:06:50.422: INFO: Got endpoints: latency-svc-8c29m [749.808366ms]
+May 29 19:06:50.448: INFO: Created: latency-svc-5ddhg
+May 29 19:06:50.472: INFO: Got endpoints: latency-svc-6kql8 [746.409897ms]
+May 29 19:06:50.491: INFO: Created: latency-svc-xfmsw
+May 29 19:06:50.523: INFO: Got endpoints: latency-svc-g2lx8 [749.865735ms]
+May 29 19:06:50.544: INFO: Created: latency-svc-sdwmw
+May 29 19:06:50.572: INFO: Got endpoints: latency-svc-hdfm9 [747.435655ms]
+May 29 19:06:50.590: INFO: Created: latency-svc-fsvdx
+May 29 19:06:50.635: INFO: Got endpoints: latency-svc-x8mjl [763.022469ms]
+May 29 19:06:50.665: INFO: Created: latency-svc-fln9n
+May 29 19:06:50.721: INFO: Got endpoints: latency-svc-n4s4w [796.096699ms]
+May 29 19:06:50.728: INFO: Got endpoints: latency-svc-mxr65 [755.195554ms]
+May 29 19:06:50.824: INFO: Got endpoints: latency-svc-mmwnh [750.807275ms]
+May 29 19:06:51.022: INFO: Got endpoints: latency-svc-tbnk8 [849.19888ms]
+May 29 19:06:51.023: INFO: Got endpoints: latency-svc-xpv4d [899.510136ms]
+May 29 19:06:51.023: INFO: Got endpoints: latency-svc-bh4ww [999.965904ms]
+May 29 19:06:51.024: INFO: Got endpoints: latency-svc-6p4zt [801.567022ms]
+May 29 19:06:51.028: INFO: Created: latency-svc-lw2hr
+May 29 19:06:51.029: INFO: Got endpoints: latency-svc-tnfxq [756.478275ms]
+May 29 19:06:51.127: INFO: Got endpoints: latency-svc-cdq9w [804.157923ms]
+May 29 19:06:51.130: INFO: Got endpoints: latency-svc-n28zs [756.552434ms]
+May 29 19:06:51.139: INFO: Created: latency-svc-pkb5b
+May 29 19:06:51.146: INFO: Created: latency-svc-hnj76
+May 29 19:06:51.220: INFO: Got endpoints: latency-svc-5ddhg [792.527988ms]
+May 29 19:06:51.224: INFO: Got endpoints: latency-svc-xfmsw [751.72614ms]
+May 29 19:06:51.235: INFO: Created: latency-svc-nm6bg
+May 29 19:06:51.247: INFO: Created: latency-svc-tgr6g
+May 29 19:06:51.259: INFO: Created: latency-svc-9vtzg
+May 29 19:06:51.273: INFO: Got endpoints: latency-svc-sdwmw [749.936726ms]
+May 29 19:06:51.273: INFO: Created: latency-svc-56hmg
+May 29 19:06:51.287: INFO: Created: latency-svc-sh92m
+May 29 19:06:51.319: INFO: Created: latency-svc-545nq
+May 29 19:06:51.323: INFO: Got endpoints: latency-svc-fsvdx [750.33724ms]
+May 29 19:06:51.333: INFO: Created: latency-svc-nhbc5
+May 29 19:06:51.345: INFO: Created: latency-svc-792tp
+May 29 19:06:51.357: INFO: Created: latency-svc-886zc
+May 29 19:06:51.371: INFO: Created: latency-svc-2cffd
+May 29 19:06:51.374: INFO: Got endpoints: latency-svc-fln9n [738.717083ms]
+May 29 19:06:51.382: INFO: Created: latency-svc-kdd6r
+May 29 19:06:51.422: INFO: Created: latency-svc-7fcgw
+May 29 19:06:51.430: INFO: Got endpoints: latency-svc-lw2hr [708.732081ms]
+May 29 19:06:51.449: INFO: Created: latency-svc-s9t8t
+May 29 19:06:51.472: INFO: Got endpoints: latency-svc-pkb5b [744.368939ms]
+May 29 19:06:51.494: INFO: Created: latency-svc-5kfvd
+May 29 19:06:51.523: INFO: Got endpoints: latency-svc-hnj76 [698.92857ms]
+May 29 19:06:51.543: INFO: Created: latency-svc-nqqtn
+May 29 19:06:51.573: INFO: Got endpoints: latency-svc-nm6bg [550.776083ms]
+May 29 19:06:51.624: INFO: Got endpoints: latency-svc-tgr6g [601.346075ms]
+May 29 19:06:51.674: INFO: Got endpoints: latency-svc-9vtzg [650.920936ms]
+May 29 19:06:51.722: INFO: Got endpoints: latency-svc-56hmg [697.294018ms]
+May 29 19:06:51.775: INFO: Got endpoints: latency-svc-sh92m [744.118879ms]
+May 29 19:06:51.822: INFO: Got endpoints: latency-svc-545nq [695.179513ms]
+May 29 19:06:51.873: INFO: Got endpoints: latency-svc-nhbc5 [743.056221ms]
+May 29 19:06:51.923: INFO: Got endpoints: latency-svc-792tp [703.462825ms]
+May 29 19:06:51.973: INFO: Got endpoints: latency-svc-886zc [749.516756ms]
+May 29 19:06:52.023: INFO: Got endpoints: latency-svc-2cffd [750.642983ms]
+May 29 19:06:52.072: INFO: Got endpoints: latency-svc-kdd6r [749.774039ms]
+May 29 19:06:52.123: INFO: Got endpoints: latency-svc-7fcgw [749.132013ms]
+May 29 19:06:52.172: INFO: Got endpoints: latency-svc-s9t8t [742.190614ms]
+May 29 19:06:52.223: INFO: Got endpoints: latency-svc-5kfvd [749.591859ms]
+May 29 19:06:52.273: INFO: Got endpoints: latency-svc-nqqtn [749.806722ms]
+May 29 19:06:52.276: INFO: Latencies: [25.328889ms 37.145935ms 48.794464ms 59.973141ms 66.431212ms 105.039916ms 117.220165ms 183.818902ms 198.356022ms 212.101154ms 222.015803ms 233.288155ms 235.601335ms 244.425624ms 245.823552ms 251.151398ms 251.665825ms 262.980436ms 263.091336ms 263.575937ms 264.261085ms 272.698326ms 283.000154ms 286.020743ms 286.27429ms 289.52936ms 290.52414ms 290.684281ms 290.826137ms 291.759041ms 293.120931ms 293.4767ms 293.85447ms 295.297754ms 295.592248ms 297.321559ms 298.540199ms 300.589774ms 301.157209ms 301.37562ms 301.401209ms 302.260445ms 303.143655ms 307.079794ms 308.775863ms 319.591187ms 323.439762ms 327.235037ms 333.532095ms 349.889306ms 352.336058ms 378.405526ms 393.599874ms 397.88531ms 399.031164ms 414.67242ms 429.636743ms 483.549409ms 509.962316ms 542.673293ms 549.00645ms 550.776083ms 592.93159ms 601.11869ms 601.346075ms 602.290002ms 632.529234ms 650.295584ms 650.920936ms 652.405941ms 672.416532ms 690.3522ms 695.179513ms 697.294018ms 697.789903ms 698.92857ms 699.32044ms 702.323977ms 703.462825ms 708.732081ms 708.782881ms 738.717083ms 739.456576ms 740.05416ms 742.190614ms 743.056221ms 743.330027ms 743.503113ms 744.118879ms 744.368939ms 744.491673ms 744.708381ms 745.019052ms 745.327825ms 745.728727ms 746.073587ms 746.409897ms 746.630994ms 746.718528ms 746.854703ms 747.435655ms 747.510677ms 747.59568ms 747.865926ms 747.953189ms 748.24618ms 748.741942ms 748.766241ms 748.834706ms 749.068608ms 749.132013ms 749.450664ms 749.49592ms 749.516756ms 749.591859ms 749.599775ms 749.616195ms 749.745318ms 749.774039ms 749.806722ms 749.808366ms 749.855115ms 749.865735ms 749.936726ms 749.955985ms 749.968189ms 750.004374ms 750.026399ms 750.087742ms 750.105799ms 750.184717ms 750.195486ms 750.279734ms 750.33724ms 750.467185ms 750.475382ms 750.642585ms 750.642983ms 750.76721ms 750.787277ms 750.806333ms 750.807275ms 750.934776ms 750.984919ms 750.997594ms 751.043542ms 751.145101ms 751.249045ms 751.279623ms 751.426773ms 751.466894ms 751.513664ms 751.539042ms 751.631624ms 751.679316ms 751.72614ms 751.943844ms 752.207428ms 752.289653ms 752.571642ms 752.715273ms 752.806175ms 752.816263ms 754.864405ms 755.195554ms 756.478275ms 756.552434ms 757.999116ms 758.386264ms 763.022469ms 782.288795ms 791.627379ms 792.527988ms 796.096699ms 801.023804ms 801.567022ms 802.668489ms 802.987495ms 803.596473ms 804.157923ms 808.019477ms 844.609987ms 847.27164ms 849.19888ms 850.677777ms 852.016603ms 860.293564ms 890.949518ms 891.917388ms 891.967601ms 895.373545ms 895.973705ms 896.035248ms 898.918207ms 898.969266ms 899.36533ms 899.510136ms 900.827179ms 902.467672ms 999.965904ms]
+May 29 19:06:52.279: INFO: 50 %ile: 747.435655ms
+May 29 19:06:52.280: INFO: 90 %ile: 808.019477ms
+May 29 19:06:52.280: INFO: 99 %ile: 902.467672ms
+May 29 19:06:52.280: INFO: Total sample count: 200
+[AfterEach] [sig-network] Service endpoints latency
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:06:52.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-svc-latency-gsgx2" for this suite.
+May 29 19:07:20.311: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:07:20.403: INFO: namespace: e2e-tests-svc-latency-gsgx2, resource: bindings, ignored listing per whitelist
+May 29 19:07:20.522: INFO: namespace e2e-tests-svc-latency-gsgx2 deletion completed in 28.233718661s
+
+• [SLOW TEST:40.289 seconds]
+[sig-network] Service endpoints latency
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should not be very high  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:07:20.523: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-rf5mx
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+May 29 19:07:20.818: INFO: Number of nodes with available pods: 0
+May 29 19:07:20.818: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:21.833: INFO: Number of nodes with available pods: 0
+May 29 19:07:21.833: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:22.836: INFO: Number of nodes with available pods: 1
+May 29 19:07:22.836: INFO: Node scw-sono13-default-71171af685174eada6c25c1541e is running more than one daemon pod
+May 29 19:07:23.833: INFO: Number of nodes with available pods: 2
+May 29 19:07:23.834: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Stop a daemon pod, check that the daemon pod is revived.
+May 29 19:07:23.871: INFO: Number of nodes with available pods: 1
+May 29 19:07:23.871: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:24.886: INFO: Number of nodes with available pods: 1
+May 29 19:07:24.886: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:25.888: INFO: Number of nodes with available pods: 1
+May 29 19:07:25.888: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:26.886: INFO: Number of nodes with available pods: 1
+May 29 19:07:26.886: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:27.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:27.888: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:28.894: INFO: Number of nodes with available pods: 1
+May 29 19:07:28.894: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:29.898: INFO: Number of nodes with available pods: 1
+May 29 19:07:29.898: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:30.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:30.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:31.894: INFO: Number of nodes with available pods: 1
+May 29 19:07:31.894: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:32.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:32.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:33.888: INFO: Number of nodes with available pods: 1
+May 29 19:07:33.888: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:34.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:34.888: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:35.886: INFO: Number of nodes with available pods: 1
+May 29 19:07:35.886: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:36.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:36.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:37.885: INFO: Number of nodes with available pods: 1
+May 29 19:07:37.885: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:38.893: INFO: Number of nodes with available pods: 1
+May 29 19:07:38.893: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:39.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:39.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:40.888: INFO: Number of nodes with available pods: 1
+May 29 19:07:40.888: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:41.886: INFO: Number of nodes with available pods: 1
+May 29 19:07:41.886: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:42.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:42.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:43.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:43.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:44.889: INFO: Number of nodes with available pods: 1
+May 29 19:07:44.889: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:45.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:45.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:46.888: INFO: Number of nodes with available pods: 1
+May 29 19:07:46.888: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:47.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:47.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:48.886: INFO: Number of nodes with available pods: 1
+May 29 19:07:48.886: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:49.896: INFO: Number of nodes with available pods: 1
+May 29 19:07:49.896: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:50.892: INFO: Number of nodes with available pods: 1
+May 29 19:07:50.892: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:51.886: INFO: Number of nodes with available pods: 1
+May 29 19:07:51.886: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:52.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:52.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:53.898: INFO: Number of nodes with available pods: 1
+May 29 19:07:53.898: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:54.889: INFO: Number of nodes with available pods: 1
+May 29 19:07:54.889: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:55.898: INFO: Number of nodes with available pods: 1
+May 29 19:07:55.898: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:56.888: INFO: Number of nodes with available pods: 1
+May 29 19:07:56.888: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:57.886: INFO: Number of nodes with available pods: 1
+May 29 19:07:57.886: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:58.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:58.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:07:59.887: INFO: Number of nodes with available pods: 1
+May 29 19:07:59.887: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:08:00.897: INFO: Number of nodes with available pods: 2
+May 29 19:08:00.897: INFO: Number of running nodes: 2, number of available pods: 2
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-rf5mx, will wait for the garbage collector to delete the pods
+May 29 19:08:00.969: INFO: Deleting DaemonSet.extensions daemon-set took: 10.93645ms
+May 29 19:08:01.070: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.255594ms
+May 29 19:08:43.785: INFO: Number of nodes with available pods: 0
+May 29 19:08:43.785: INFO: Number of running nodes: 0, number of available pods: 0
+May 29 19:08:43.790: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-rf5mx/daemonsets","resourceVersion":"948930564"},"items":null}
+
+May 29 19:08:43.795: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-rf5mx/pods","resourceVersion":"948930564"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:08:43.814: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-daemonsets-rf5mx" for this suite.
+May 29 19:08:49.845: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:08:49.996: INFO: namespace: e2e-tests-daemonsets-rf5mx, resource: bindings, ignored listing per whitelist
+May 29 19:08:50.151: INFO: namespace e2e-tests-daemonsets-rf5mx deletion completed in 6.332088454s
+
+• [SLOW TEST:89.629 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command in a pod 
+  should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:08:50.152: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-wj579
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:08:52.472: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubelet-test-wj579" for this suite.
+May 29 19:09:36.516: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:09:36.609: INFO: namespace: e2e-tests-kubelet-test-wj579, resource: bindings, ignored listing per whitelist
+May 29 19:09:36.765: INFO: namespace e2e-tests-kubelet-test-wj579 deletion completed in 44.276902168s
+
+• [SLOW TEST:46.614 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when scheduling a busybox command in a pod
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:40
+    should print the output to logs [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:09:36.766: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-jfdbx
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-4d30640e-8245-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:09:37.107: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-jfdbx" to be "success or failure"
+May 29 19:09:37.113: INFO: Pod "pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.701969ms
+May 29 19:09:39.121: INFO: Pod "pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012968122s
+May 29 19:09:41.139: INFO: Pod "pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.031809341s
+STEP: Saw pod success
+May 29 19:09:41.139: INFO: Pod "pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:09:41.146: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d container projected-secret-volume-test: 
+STEP: delete the pod
+May 29 19:09:41.178: INFO: Waiting for pod pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:09:41.183: INFO: Pod pod-projected-secrets-4d31a57a-8245-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:09:41.183: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-jfdbx" for this suite.
+May 29 19:09:47.215: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:09:47.257: INFO: namespace: e2e-tests-projected-jfdbx, resource: bindings, ignored listing per whitelist
+May 29 19:09:47.486: INFO: namespace e2e-tests-projected-jfdbx deletion completed in 6.293198869s
+
+• [SLOW TEST:10.720 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Downward API volume 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:09:47.486: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-lgkx7
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating the pod
+May 29 19:09:50.329: INFO: Successfully updated pod "labelsupdate538e0506-8245-11e9-bd6e-667e8fbec69d"
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:09:52.369: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-lgkx7" for this suite.
+May 29 19:10:14.409: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:10:14.550: INFO: namespace: e2e-tests-downward-api-lgkx7, resource: bindings, ignored listing per whitelist
+May 29 19:10:14.696: INFO: namespace e2e-tests-downward-api-lgkx7 deletion completed in 22.317337088s
+
+• [SLOW TEST:27.210 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-node] ConfigMap 
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:10:14.696: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-cjxv4
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap e2e-tests-configmap-cjxv4/configmap-test-63c7ab46-8245-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 19:10:15.008: INFO: Waiting up to 5m0s for pod "pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-cjxv4" to be "success or failure"
+May 29 19:10:15.015: INFO: Pod "pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.541316ms
+May 29 19:10:17.022: INFO: Pod "pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013986246s
+May 29 19:10:19.030: INFO: Pod "pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021420031s
+STEP: Saw pod success
+May 29 19:10:19.030: INFO: Pod "pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:10:19.037: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d container env-test: 
+STEP: delete the pod
+May 29 19:10:19.065: INFO: Waiting for pod pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:10:19.071: INFO: Pod pod-configmaps-63c8dfcd-8245-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:10:19.071: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-cjxv4" for this suite.
+May 29 19:10:25.106: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:10:25.236: INFO: namespace: e2e-tests-configmap-cjxv4, resource: bindings, ignored listing per whitelist
+May 29 19:10:25.419: INFO: namespace e2e-tests-configmap-cjxv4 deletion completed in 6.34096191s
+
+• [SLOW TEST:10.723 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-network] DNS 
+  should provide DNS for services  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:10:25.419: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename dns
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-dns-zdzb2
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide DNS for services  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a test headless service
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2 A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.e2e-tests-dns-zdzb2;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2 A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.e2e-tests-dns-zdzb2;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-zdzb2.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 202.92.32.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.32.92.202_udp@PTR;check="$$(dig +tcp +noall +answer +search 202.92.32.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.32.92.202_tcp@PTR;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2 A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.e2e-tests-dns-zdzb2;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2 A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.e2e-tests-dns-zdzb2;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-zdzb2.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.e2e-tests-dns-zdzb2.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-zdzb2.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 202.92.32.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.32.92.202_udp@PTR;check="$$(dig +tcp +noall +answer +search 202.92.32.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.32.92.202_tcp@PTR;sleep 1; done
+
+STEP: creating a pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+May 29 19:10:35.904: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:35.950: INFO: Unable to read wheezy_tcp@dns-test-service from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:35.960: INFO: Unable to read wheezy_udp@dns-test-service.e2e-tests-dns-zdzb2 from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:35.973: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-zdzb2 from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:35.986: INFO: Unable to read wheezy_udp@dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.019: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.030: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.039: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.446: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.456: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.467: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-zdzb2 from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.477: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-zdzb2 from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.489: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.497: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.507: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.517: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc from pod e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d: the server could not find the requested resource (get pods dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d)
+May 29 19:10:36.920: INFO: Lookups using e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.e2e-tests-dns-zdzb2 wheezy_tcp@dns-test-service.e2e-tests-dns-zdzb2 wheezy_udp@dns-test-service.e2e-tests-dns-zdzb2.svc wheezy_tcp@dns-test-service.e2e-tests-dns-zdzb2.svc wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-zdzb2 jessie_tcp@dns-test-service.e2e-tests-dns-zdzb2 jessie_udp@dns-test-service.e2e-tests-dns-zdzb2.svc jessie_tcp@dns-test-service.e2e-tests-dns-zdzb2.svc jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-zdzb2.svc]
+
+May 29 19:10:43.860: INFO: DNS probes using e2e-tests-dns-zdzb2/dns-test-6a34a392-8245-11e9-bd6e-667e8fbec69d succeeded
+
+STEP: deleting the pod
+STEP: deleting the test service
+STEP: deleting the test headless service
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:10:43.926: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-dns-zdzb2" for this suite.
+May 29 19:10:49.974: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:10:50.011: INFO: namespace: e2e-tests-dns-zdzb2, resource: bindings, ignored listing per whitelist
+May 29 19:10:50.262: INFO: namespace e2e-tests-dns-zdzb2 deletion completed in 6.324767132s
+
+• [SLOW TEST:24.843 seconds]
+[sig-network] DNS
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide DNS for services  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:10:50.262: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-hw2g8
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May 29 19:10:50.552: INFO: Waiting up to 5m0s for pod "downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-hw2g8" to be "success or failure"
+May 29 19:10:50.559: INFO: Pod "downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.60732ms
+May 29 19:10:52.567: INFO: Pod "downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014647708s
+May 29 19:10:54.574: INFO: Pod "downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021544491s
+STEP: Saw pod success
+May 29 19:10:54.574: INFO: Pod "downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:10:54.596: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d container client-container: 
+STEP: delete the pod
+May 29 19:10:54.626: INFO: Waiting for pod downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:10:54.632: INFO: Pod downwardapi-volume-78f85abb-8245-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:10:54.632: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-hw2g8" for this suite.
+May 29 19:11:00.669: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:11:00.939: INFO: namespace: e2e-tests-downward-api-hw2g8, resource: bindings, ignored listing per whitelist
+May 29 19:11:00.989: INFO: namespace e2e-tests-downward-api-hw2g8 deletion completed in 6.348481874s
+
+• [SLOW TEST:10.727 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-auth] ServiceAccounts 
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:11:00.989: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-svcaccounts-7fstx
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: getting the auto-created API token
+STEP: Creating a pod to test consume service account token
+May 29 19:11:01.831: INFO: Waiting up to 5m0s for pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h" in namespace "e2e-tests-svcaccounts-7fstx" to be "success or failure"
+May 29 19:11:01.837: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h": Phase="Pending", Reason="", readiness=false. Elapsed: 5.9903ms
+May 29 19:11:03.844: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013748691s
+May 29 19:11:05.852: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021545281s
+STEP: Saw pod success
+May 29 19:11:05.852: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h" satisfied condition "success or failure"
+May 29 19:11:05.859: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h container token-test: 
+STEP: delete the pod
+May 29 19:11:05.889: INFO: Waiting for pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h to disappear
+May 29 19:11:05.895: INFO: Pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8j87h no longer exists
+STEP: Creating a pod to test consume service account root CA
+May 29 19:11:05.903: INFO: Waiting up to 5m0s for pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2" in namespace "e2e-tests-svcaccounts-7fstx" to be "success or failure"
+May 29 19:11:05.911: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2": Phase="Pending", Reason="", readiness=false. Elapsed: 7.444845ms
+May 29 19:11:07.925: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022128519s
+May 29 19:11:09.932: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029105603s
+STEP: Saw pod success
+May 29 19:11:09.932: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2" satisfied condition "success or failure"
+May 29 19:11:09.939: INFO: Trying to get logs from node scw-sono13-default-2865dd8133304358ae8da697bb2 pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2 container root-ca-test: 
+STEP: delete the pod
+May 29 19:11:09.979: INFO: Waiting for pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2 to disappear
+May 29 19:11:09.985: INFO: Pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-7h2g2 no longer exists
+STEP: Creating a pod to test consume service account namespace
+May 29 19:11:09.993: INFO: Waiting up to 5m0s for pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs" in namespace "e2e-tests-svcaccounts-7fstx" to be "success or failure"
+May 29 19:11:09.998: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs": Phase="Pending", Reason="", readiness=false. Elapsed: 5.211887ms
+May 29 19:11:12.007: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014432216s
+May 29 19:11:14.014: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021254683s
+STEP: Saw pod success
+May 29 19:11:14.014: INFO: Pod "pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs" satisfied condition "success or failure"
+May 29 19:11:14.022: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs container namespace-test: 
+STEP: delete the pod
+May 29 19:11:14.049: INFO: Waiting for pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs to disappear
+May 29 19:11:14.054: INFO: Pod pod-service-account-7fb16895-8245-11e9-bd6e-667e8fbec69d-8bcfs no longer exists
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:11:14.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-svcaccounts-7fstx" for this suite.
+May 29 19:11:20.085: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:11:20.116: INFO: namespace: e2e-tests-svcaccounts-7fstx, resource: bindings, ignored listing per whitelist
+May 29 19:11:20.354: INFO: namespace e2e-tests-svcaccounts-7fstx deletion completed in 6.291669726s
+
+• [SLOW TEST:19.365 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:11:20.354: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-gdgvk
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on node default medium
+May 29 19:11:20.647: INFO: Waiting up to 5m0s for pod "pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-gdgvk" to be "success or failure"
+May 29 19:11:20.653: INFO: Pod "pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.105266ms
+May 29 19:11:22.662: INFO: Pod "pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014309841s
+May 29 19:11:24.669: INFO: Pod "pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021349412s
+STEP: Saw pod success
+May 29 19:11:24.669: INFO: Pod "pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:11:24.674: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:11:24.701: INFO: Waiting for pod pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:11:24.706: INFO: Pod pod-8ae88e16-8245-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:11:24.706: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-gdgvk" for this suite.
+May 29 19:11:30.740: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:11:31.070: INFO: namespace: e2e-tests-emptydir-gdgvk, resource: bindings, ignored listing per whitelist
+May 29 19:11:31.138: INFO: namespace e2e-tests-emptydir-gdgvk deletion completed in 6.424197643s
+
+• [SLOW TEST:10.784 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:11:31.140: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-psnkt
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test use defaults
+May 29 19:11:31.484: INFO: Waiting up to 5m0s for pod "client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-containers-psnkt" to be "success or failure"
+May 29 19:11:31.518: INFO: Pod "client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 34.558301ms
+May 29 19:11:33.526: INFO: Pod "client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.042393617s
+May 29 19:11:35.720: INFO: Pod "client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.236155748s
+STEP: Saw pod success
+May 29 19:11:35.720: INFO: Pod "client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:11:35.727: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:11:36.034: INFO: Waiting for pod client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:11:36.039: INFO: Pod client-containers-915e529a-8245-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:11:36.039: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-containers-psnkt" for this suite.
+May 29 19:11:42.144: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:11:42.319: INFO: namespace: e2e-tests-containers-psnkt, resource: bindings, ignored listing per whitelist
+May 29 19:11:42.396: INFO: namespace e2e-tests-containers-psnkt deletion completed in 6.348976077s
+
+• [SLOW TEST:11.256 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:11:42.396: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-x74vx
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+May 29 19:11:42.691: INFO: Waiting up to 5m0s for pod "pod-980c29fe-8245-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-x74vx" to be "success or failure"
+May 29 19:11:42.698: INFO: Pod "pod-980c29fe-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.7886ms
+May 29 19:11:44.705: INFO: Pod "pod-980c29fe-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013715093s
+May 29 19:11:46.712: INFO: Pod "pod-980c29fe-8245-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020916831s
+STEP: Saw pod success
+May 29 19:11:46.712: INFO: Pod "pod-980c29fe-8245-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:11:46.719: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-980c29fe-8245-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:11:46.750: INFO: Waiting for pod pod-980c29fe-8245-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:11:46.757: INFO: Pod pod-980c29fe-8245-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:11:46.757: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-x74vx" for this suite.
+May 29 19:11:52.794: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:11:52.933: INFO: namespace: e2e-tests-emptydir-x74vx, resource: bindings, ignored listing per whitelist
+May 29 19:11:53.178: INFO: namespace e2e-tests-emptydir-x74vx deletion completed in 6.413438497s
+
+• [SLOW TEST:10.781 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl replace 
+  should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:11:53.178: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-zx5n5
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1563
+[It] should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May 29 19:11:53.451: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 run e2e-test-nginx-pod --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --labels=run=e2e-test-nginx-pod --namespace=e2e-tests-kubectl-zx5n5'
+May 29 19:11:54.043: INFO: stderr: ""
+May 29 19:11:54.043: INFO: stdout: "pod/e2e-test-nginx-pod created\n"
+STEP: verifying the pod e2e-test-nginx-pod is running
+STEP: verifying the pod e2e-test-nginx-pod was created
+May 29 19:11:59.093: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pod e2e-test-nginx-pod --namespace=e2e-tests-kubectl-zx5n5 -o json'
+May 29 19:11:59.194: INFO: stderr: ""
+May 29 19:11:59.194: INFO: stdout: "{\n    \"apiVersion\": \"v1\",\n    \"kind\": \"Pod\",\n    \"metadata\": {\n        \"creationTimestamp\": \"2019-05-29T19:11:54Z\",\n        \"labels\": {\n            \"run\": \"e2e-test-nginx-pod\"\n        },\n        \"name\": \"e2e-test-nginx-pod\",\n        \"namespace\": \"e2e-tests-kubectl-zx5n5\",\n        \"resourceVersion\": \"948945439\",\n        \"selfLink\": \"/api/v1/namespaces/e2e-tests-kubectl-zx5n5/pods/e2e-test-nginx-pod\",\n        \"uid\": \"9ecd7bfc-8245-11e9-ac23-d23a86388eb9\"\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"image\": \"docker.io/library/nginx:1.14-alpine\",\n                \"imagePullPolicy\": \"IfNotPresent\",\n                \"name\": \"e2e-test-nginx-pod\",\n                \"resources\": {},\n                \"terminationMessagePath\": \"/dev/termination-log\",\n                \"terminationMessagePolicy\": \"File\",\n                \"volumeMounts\": [\n                    {\n                        \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n                        \"name\": \"default-token-4nrnj\",\n                        \"readOnly\": true\n                    }\n                ]\n            }\n        ],\n        \"dnsPolicy\": \"ClusterFirst\",\n        \"enableServiceLinks\": true,\n        \"nodeName\": \"scw-sono13-default-71171af685174eada6c25c1541e\",\n        \"priority\": 0,\n        \"restartPolicy\": \"Always\",\n        \"schedulerName\": \"default-scheduler\",\n        \"securityContext\": {},\n        \"serviceAccount\": \"default\",\n        \"serviceAccountName\": \"default\",\n        \"terminationGracePeriodSeconds\": 30,\n        \"tolerations\": [\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/not-ready\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            },\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/unreachable\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            }\n        ],\n        \"volumes\": [\n            {\n                \"name\": \"default-token-4nrnj\",\n                \"secret\": {\n                    \"defaultMode\": 420,\n                    \"secretName\": \"default-token-4nrnj\"\n                }\n            }\n        ]\n    },\n    \"status\": {\n        \"conditions\": [\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-05-29T19:11:54Z\",\n                \"status\": \"True\",\n                \"type\": \"Initialized\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-05-29T19:11:56Z\",\n                \"status\": \"True\",\n                \"type\": \"Ready\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-05-29T19:11:56Z\",\n                \"status\": \"True\",\n                \"type\": \"ContainersReady\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-05-29T19:11:54Z\",\n                \"status\": \"True\",\n                \"type\": \"PodScheduled\"\n            }\n        ],\n        \"containerStatuses\": [\n            {\n                \"containerID\": \"docker://57a4adf51e2bd0ad19426ee3d6adea4de0f36c899b4e5b6a966f5d1f75cce0a1\",\n                \"image\": \"nginx:1.14-alpine\",\n                \"imageID\": \"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7\",\n                \"lastState\": {},\n                \"name\": \"e2e-test-nginx-pod\",\n                \"ready\": true,\n                \"restartCount\": 0,\n                \"state\": {\n                    \"running\": {\n                        \"startedAt\": \"2019-05-29T19:11:55Z\"\n                    }\n                }\n            }\n        ],\n        \"hostIP\": \"10.12.157.201\",\n        \"phase\": \"Running\",\n        \"podIP\": \"100.64.1.137\",\n        \"qosClass\": \"BestEffort\",\n        \"startTime\": \"2019-05-29T19:11:54Z\"\n    }\n}\n"
+STEP: replace the image in the pod
+May 29 19:11:59.196: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 replace -f - --namespace=e2e-tests-kubectl-zx5n5'
+May 29 19:11:59.540: INFO: stderr: ""
+May 29 19:11:59.540: INFO: stdout: "pod/e2e-test-nginx-pod replaced\n"
+STEP: verifying the pod e2e-test-nginx-pod has the right image docker.io/library/busybox:1.29
+[AfterEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1568
+May 29 19:11:59.550: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete pods e2e-test-nginx-pod --namespace=e2e-tests-kubectl-zx5n5'
+May 29 19:12:03.714: INFO: stderr: ""
+May 29 19:12:03.714: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:12:03.714: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-zx5n5" for this suite.
+May 29 19:12:09.755: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:12:09.779: INFO: namespace: e2e-tests-kubectl-zx5n5, resource: bindings, ignored listing per whitelist
+May 29 19:12:09.986: INFO: namespace e2e-tests-kubectl-zx5n5 deletion completed in 6.255200664s
+
+• [SLOW TEST:16.808 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl replace
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should update a single-container pod's image  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should run and stop complex daemon [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:12:09.986: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-cfxhs
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should run and stop complex daemon [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:12:10.293: INFO: Creating daemon "daemon-set" with a node selector
+STEP: Initially, daemon pods should not be running on any nodes.
+May 29 19:12:10.306: INFO: Number of nodes with available pods: 0
+May 29 19:12:10.306: INFO: Number of running nodes: 0, number of available pods: 0
+STEP: Change node label to blue, check that daemon pod is launched.
+May 29 19:12:10.338: INFO: Number of nodes with available pods: 0
+May 29 19:12:10.338: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:11.345: INFO: Number of nodes with available pods: 0
+May 29 19:12:11.346: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:12.346: INFO: Number of nodes with available pods: 1
+May 29 19:12:12.346: INFO: Number of running nodes: 1, number of available pods: 1
+STEP: Update the node label to green, and wait for daemons to be unscheduled
+May 29 19:12:12.375: INFO: Number of nodes with available pods: 0
+May 29 19:12:12.375: INFO: Number of running nodes: 0, number of available pods: 0
+STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate
+May 29 19:12:12.397: INFO: Number of nodes with available pods: 0
+May 29 19:12:12.397: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:13.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:13.406: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:14.412: INFO: Number of nodes with available pods: 0
+May 29 19:12:14.412: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:15.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:15.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:16.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:16.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:17.404: INFO: Number of nodes with available pods: 0
+May 29 19:12:17.404: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:18.408: INFO: Number of nodes with available pods: 0
+May 29 19:12:18.408: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:19.409: INFO: Number of nodes with available pods: 0
+May 29 19:12:19.409: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:20.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:20.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:21.406: INFO: Number of nodes with available pods: 0
+May 29 19:12:21.406: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:22.406: INFO: Number of nodes with available pods: 0
+May 29 19:12:22.406: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:23.406: INFO: Number of nodes with available pods: 0
+May 29 19:12:23.406: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:24.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:24.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:25.412: INFO: Number of nodes with available pods: 0
+May 29 19:12:25.412: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:26.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:26.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:27.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:27.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:28.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:28.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:29.404: INFO: Number of nodes with available pods: 0
+May 29 19:12:29.404: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:30.404: INFO: Number of nodes with available pods: 0
+May 29 19:12:30.404: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:31.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:31.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:32.404: INFO: Number of nodes with available pods: 0
+May 29 19:12:32.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:33.419: INFO: Number of nodes with available pods: 0
+May 29 19:12:33.419: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:34.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:34.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:35.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:35.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:36.413: INFO: Number of nodes with available pods: 0
+May 29 19:12:36.413: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:37.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:37.406: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:38.410: INFO: Number of nodes with available pods: 0
+May 29 19:12:38.410: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:39.406: INFO: Number of nodes with available pods: 0
+May 29 19:12:39.406: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:40.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:40.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:41.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:41.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:42.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:42.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:43.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:43.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:44.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:44.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:45.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:45.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:46.404: INFO: Number of nodes with available pods: 0
+May 29 19:12:46.404: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:47.412: INFO: Number of nodes with available pods: 0
+May 29 19:12:47.412: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:48.406: INFO: Number of nodes with available pods: 0
+May 29 19:12:48.406: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:49.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:49.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:50.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:50.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:51.405: INFO: Number of nodes with available pods: 0
+May 29 19:12:51.405: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:12:52.406: INFO: Number of nodes with available pods: 1
+May 29 19:12:52.406: INFO: Number of running nodes: 1, number of available pods: 1
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-cfxhs, will wait for the garbage collector to delete the pods
+May 29 19:12:52.486: INFO: Deleting DaemonSet.extensions daemon-set took: 11.68822ms
+May 29 19:12:52.586: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.259169ms
+May 29 19:13:30.100: INFO: Number of nodes with available pods: 0
+May 29 19:13:30.100: INFO: Number of running nodes: 0, number of available pods: 0
+May 29 19:13:30.106: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-cfxhs/daemonsets","resourceVersion":"948952620"},"items":null}
+
+May 29 19:13:30.112: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-cfxhs/pods","resourceVersion":"948952620"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:13:30.140: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-daemonsets-cfxhs" for this suite.
+May 29 19:13:36.167: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:13:36.246: INFO: namespace: e2e-tests-daemonsets-cfxhs, resource: bindings, ignored listing per whitelist
+May 29 19:13:36.416: INFO: namespace e2e-tests-daemonsets-cfxhs deletion completed in 6.268839699s
+
+• [SLOW TEST:86.430 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should run and stop complex daemon [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:13:36.416: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-vtvjd
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward api env vars
+May 29 19:13:36.794: INFO: Waiting up to 5m0s for pod "downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-vtvjd" to be "success or failure"
+May 29 19:13:36.800: INFO: Pod "downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.110646ms
+May 29 19:13:38.807: INFO: Pod "downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01363912s
+May 29 19:13:40.822: INFO: Pod "downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028601566s
+STEP: Saw pod success
+May 29 19:13:40.822: INFO: Pod "downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:13:40.830: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d container dapi-container: 
+STEP: delete the pod
+May 29 19:13:40.857: INFO: Waiting for pod downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:13:40.862: INFO: Pod downward-api-dc0eecb3-8245-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:13:40.862: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-vtvjd" for this suite.
+May 29 19:13:46.892: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:13:46.948: INFO: namespace: e2e-tests-downward-api-vtvjd, resource: bindings, ignored listing per whitelist
+May 29 19:13:47.122: INFO: namespace e2e-tests-downward-api-vtvjd deletion completed in 6.25277078s
+
+• [SLOW TEST:10.707 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-apps] Deployment 
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:13:47.123: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-kjqnz
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:13:47.410: INFO: Pod name cleanup-pod: Found 0 pods out of 1
+May 29 19:13:52.427: INFO: Pod name cleanup-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+May 29 19:13:52.427: INFO: Creating deployment test-cleanup-deployment
+STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+May 29 19:13:56.478: INFO: Deployment "test-cleanup-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment,GenerateName:,Namespace:e2e-tests-deployment-kjqnz,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-kjqnz/deployments/test-cleanup-deployment,UID:e5642027-8245-11e9-9b18-c2b4512ea1b9,ResourceVersion:948954578,Generation:1,CreationTimestamp:2019-05-29 19:13:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 1,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-05-29 19:13:52 +0000 UTC 2019-05-29 19:13:52 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-05-29 19:13:54 +0000 UTC 2019-05-29 19:13:52 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-cleanup-deployment-7dbbfcf846" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+May 29 19:13:56.485: INFO: New ReplicaSet "test-cleanup-deployment-7dbbfcf846" of Deployment "test-cleanup-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-7dbbfcf846,GenerateName:,Namespace:e2e-tests-deployment-kjqnz,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-kjqnz/replicasets/test-cleanup-deployment-7dbbfcf846,UID:e5667932-8245-11e9-9b18-c2b4512ea1b9,ResourceVersion:948954567,Generation:1,CreationTimestamp:2019-05-29 19:13:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-cleanup-deployment e5642027-8245-11e9-9b18-c2b4512ea1b9 0xc0016ee817 0xc0016ee818}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+May 29 19:13:56.494: INFO: Pod "test-cleanup-deployment-7dbbfcf846-l72z2" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-7dbbfcf846-l72z2,GenerateName:test-cleanup-deployment-7dbbfcf846-,Namespace:e2e-tests-deployment-kjqnz,SelfLink:/api/v1/namespaces/e2e-tests-deployment-kjqnz/pods/test-cleanup-deployment-7dbbfcf846-l72z2,UID:e56743c4-8245-11e9-9b18-c2b4512ea1b9,ResourceVersion:948954566,Generation:0,CreationTimestamp:2019-05-29 19:13:52 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-cleanup-deployment-7dbbfcf846 e5667932-8245-11e9-9b18-c2b4512ea1b9 0xc001ac6297 0xc001ac6298}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-g6fzh {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-g6fzh,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-g6fzh true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001ac6370} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001ac6390}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:13:52 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:13:54 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:13:54 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:13:52 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:100.64.0.65,StartTime:2019-05-29 19:13:52 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-05-29 19:13:53 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://47b521e88a05a28c27bdc89df6b7a8e658bae929acc680e8a2eb345aaf364620}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:13:56.494: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-kjqnz" for this suite.
+May 29 19:14:02.534: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:14:02.762: INFO: namespace: e2e-tests-deployment-kjqnz, resource: bindings, ignored listing per whitelist
+May 29 19:14:02.817: INFO: namespace e2e-tests-deployment-kjqnz deletion completed in 6.315401839s
+
+• [SLOW TEST:15.694 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Guestbook application 
+  should create and stop a working application  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:14:02.817: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-rjwff
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should create and stop a working application  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating all guestbook components
+May 29 19:14:03.094: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-slave
+  labels:
+    app: redis
+    role: slave
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+  selector:
+    app: redis
+    role: slave
+    tier: backend
+
+May 29 19:14:03.094: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:03.380: INFO: stderr: ""
+May 29 19:14:03.380: INFO: stdout: "service/redis-slave created\n"
+May 29 19:14:03.380: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: redis-master
+  labels:
+    app: redis
+    role: master
+    tier: backend
+spec:
+  ports:
+  - port: 6379
+    targetPort: 6379
+  selector:
+    app: redis
+    role: master
+    tier: backend
+
+May 29 19:14:03.380: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:03.630: INFO: stderr: ""
+May 29 19:14:03.630: INFO: stdout: "service/redis-master created\n"
+May 29 19:14:03.630: INFO: apiVersion: v1
+kind: Service
+metadata:
+  name: frontend
+  labels:
+    app: guestbook
+    tier: frontend
+spec:
+  # if your cluster supports it, uncomment the following to automatically create
+  # an external load-balanced IP for the frontend service.
+  # type: LoadBalancer
+  ports:
+  - port: 80
+  selector:
+    app: guestbook
+    tier: frontend
+
+May 29 19:14:03.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:03.900: INFO: stderr: ""
+May 29 19:14:03.900: INFO: stdout: "service/frontend created\n"
+May 29 19:14:03.900: INFO: apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: frontend
+spec:
+  replicas: 3
+  template:
+    metadata:
+      labels:
+        app: guestbook
+        tier: frontend
+    spec:
+      containers:
+      - name: php-redis
+        image: gcr.io/google-samples/gb-frontend:v6
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access environment variables to find service host
+          # info, comment out the 'value: dns' line above, and uncomment the
+          # line below:
+          # value: env
+        ports:
+        - containerPort: 80
+
+May 29 19:14:03.900: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:04.151: INFO: stderr: ""
+May 29 19:14:04.151: INFO: stdout: "deployment.extensions/frontend created\n"
+May 29 19:14:04.151: INFO: apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: redis-master
+spec:
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: master
+        tier: backend
+    spec:
+      containers:
+      - name: master
+        image: gcr.io/kubernetes-e2e-test-images/redis:1.0
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        ports:
+        - containerPort: 6379
+
+May 29 19:14:04.151: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:04.395: INFO: stderr: ""
+May 29 19:14:04.395: INFO: stdout: "deployment.extensions/redis-master created\n"
+May 29 19:14:04.395: INFO: apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  name: redis-slave
+spec:
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: redis
+        role: slave
+        tier: backend
+    spec:
+      containers:
+      - name: slave
+        image: gcr.io/google-samples/gb-redisslave:v3
+        resources:
+          requests:
+            cpu: 100m
+            memory: 100Mi
+        env:
+        - name: GET_HOSTS_FROM
+          value: dns
+          # If your cluster config does not include a dns service, then to
+          # instead access an environment variable to find the master
+          # service's host, comment out the 'value: dns' line above, and
+          # uncomment the line below:
+          # value: env
+        ports:
+        - containerPort: 6379
+
+May 29 19:14:04.395: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:04.687: INFO: stderr: ""
+May 29 19:14:04.687: INFO: stdout: "deployment.extensions/redis-slave created\n"
+STEP: validating guestbook app
+May 29 19:14:04.687: INFO: Waiting for all frontend pods to be Running.
+May 29 19:14:24.744: INFO: Waiting for frontend to serve content.
+May 29 19:14:24.852: INFO: Trying to add a new entry to the guestbook.
+May 29 19:14:24.979: INFO: Verifying that added entry can be retrieved.
+STEP: using delete to clean up resources
+May 29 19:14:25.027: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:25.203: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:14:25.203: INFO: stdout: "service \"redis-slave\" force deleted\n"
+STEP: using delete to clean up resources
+May 29 19:14:25.204: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:25.386: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:14:25.386: INFO: stdout: "service \"redis-master\" force deleted\n"
+STEP: using delete to clean up resources
+May 29 19:14:25.386: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:25.543: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:14:25.543: INFO: stdout: "service \"frontend\" force deleted\n"
+STEP: using delete to clean up resources
+May 29 19:14:25.544: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:25.671: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:14:25.671: INFO: stdout: "deployment.extensions \"frontend\" force deleted\n"
+STEP: using delete to clean up resources
+May 29 19:14:25.672: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:25.850: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:14:25.850: INFO: stdout: "deployment.extensions \"redis-master\" force deleted\n"
+STEP: using delete to clean up resources
+May 29 19:14:25.850: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-rjwff'
+May 29 19:14:26.275: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:14:26.275: INFO: stdout: "deployment.extensions \"redis-slave\" force deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:14:26.275: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-rjwff" for this suite.
+May 29 19:15:06.306: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:15:06.502: INFO: namespace: e2e-tests-kubectl-rjwff, resource: bindings, ignored listing per whitelist
+May 29 19:15:06.613: INFO: namespace e2e-tests-kubectl-rjwff deletion completed in 40.330341255s
+
+• [SLOW TEST:63.796 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Guestbook application
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create and stop a working application  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:15:06.613: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-fhstt
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the rs
+STEP: Gathering metrics
+W0529 19:15:36.982058      19 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May 29 19:15:36.982: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:15:36.982: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-fhstt" for this suite.
+May 29 19:15:43.008: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:15:43.242: INFO: namespace: e2e-tests-gc-fhstt, resource: bindings, ignored listing per whitelist
+May 29 19:15:43.285: INFO: namespace e2e-tests-gc-fhstt deletion completed in 6.298337324s
+
+• [SLOW TEST:36.672 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:15:43.286: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-lq95d
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-27a08415-8246-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 19:15:43.588: INFO: Waiting up to 5m0s for pod "pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-lq95d" to be "success or failure"
+May 29 19:15:43.594: INFO: Pod "pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.443261ms
+May 29 19:15:45.601: INFO: Pod "pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01362063s
+May 29 19:15:47.617: INFO: Pod "pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028890821s
+STEP: Saw pod success
+May 29 19:15:47.617: INFO: Pod "pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:15:47.625: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d container configmap-volume-test: 
+STEP: delete the pod
+May 29 19:15:47.658: INFO: Waiting for pod pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:15:47.663: INFO: Pod pod-configmaps-27a1bab0-8246-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:15:47.663: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-lq95d" for this suite.
+May 29 19:15:53.708: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:15:53.867: INFO: namespace: e2e-tests-configmap-lq95d, resource: bindings, ignored listing per whitelist
+May 29 19:15:53.971: INFO: namespace e2e-tests-configmap-lq95d deletion completed in 6.300828569s
+
+• [SLOW TEST:10.685 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:15:53.972: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-r6mjl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating the pod
+May 29 19:15:56.856: INFO: Successfully updated pod "labelsupdate2e0505c7-8246-11e9-bd6e-667e8fbec69d"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:16:00.911: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-r6mjl" for this suite.
+May 29 19:16:22.941: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:16:22.995: INFO: namespace: e2e-tests-projected-r6mjl, resource: bindings, ignored listing per whitelist
+May 29 19:16:23.175: INFO: namespace e2e-tests-projected-r6mjl deletion completed in 22.256066402s
+
+• [SLOW TEST:29.203 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:16:23.175: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename namespaces
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-namespaces-lhvql
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a test namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-cdksf
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a pod in the namespace
+STEP: Waiting for the pod to have running status
+STEP: Creating an uninitialized pod in the namespace
+May 29 19:16:27.626: INFO: error from create uninitialized namespace: 
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-nsdeletetest-dnbl6
+STEP: Verifying there are no pods in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:16:50.815: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-namespaces-lhvql" for this suite.
+May 29 19:16:56.851: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:16:56.934: INFO: namespace: e2e-tests-namespaces-lhvql, resource: bindings, ignored listing per whitelist
+May 29 19:16:57.121: INFO: namespace e2e-tests-namespaces-lhvql deletion completed in 6.294334225s
+STEP: Destroying namespace "e2e-tests-nsdeletetest-cdksf" for this suite.
+May 29 19:16:57.127: INFO: Namespace e2e-tests-nsdeletetest-cdksf was already deleted
+STEP: Destroying namespace "e2e-tests-nsdeletetest-dnbl6" for this suite.
+May 29 19:17:03.158: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:17:03.438: INFO: namespace: e2e-tests-nsdeletetest-dnbl6, resource: bindings, ignored listing per whitelist
+May 29 19:17:03.456: INFO: namespace e2e-tests-nsdeletetest-dnbl6 deletion completed in 6.328025168s
+
+• [SLOW TEST:40.280 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-apps] Deployment 
+  deployment should support rollover [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:17:03.456: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-g9d4t
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should support rollover [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:17:03.818: INFO: Pod name rollover-pod: Found 0 pods out of 1
+May 29 19:17:08.826: INFO: Pod name rollover-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+May 29 19:17:08.826: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready
+May 29 19:17:10.832: INFO: Creating deployment "test-rollover-deployment"
+May 29 19:17:10.845: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations
+May 29 19:17:12.859: INFO: Check revision of new replica set for deployment "test-rollover-deployment"
+May 29 19:17:12.873: INFO: Ensure that both replica sets have 1 created replica
+May 29 19:17:12.886: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update
+May 29 19:17:12.899: INFO: Updating deployment test-rollover-deployment
+May 29 19:17:12.899: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller
+May 29 19:17:14.920: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2
+May 29 19:17:14.931: INFO: Make sure deployment "test-rollover-deployment" is complete
+May 29 19:17:14.943: INFO: all replica sets need to contain the pod-template-hash label
+May 29 19:17:14.944: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754232, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May 29 19:17:16.958: INFO: all replica sets need to contain the pod-template-hash label
+May 29 19:17:16.958: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754235, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May 29 19:17:18.957: INFO: all replica sets need to contain the pod-template-hash label
+May 29 19:17:18.957: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754235, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May 29 19:17:20.958: INFO: all replica sets need to contain the pod-template-hash label
+May 29 19:17:20.958: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754235, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May 29 19:17:22.957: INFO: all replica sets need to contain the pod-template-hash label
+May 29 19:17:22.957: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754235, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May 29 19:17:24.968: INFO: all replica sets need to contain the pod-template-hash label
+May 29 19:17:24.968: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754235, loc:(*time.Location)(0x7b33b80)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754230, loc:(*time.Location)(0x7b33b80)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May 29 19:17:26.957: INFO: 
+May 29 19:17:26.957: INFO: Ensure that both old replica sets have no replicas
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+May 29 19:17:26.972: INFO: Deployment "test-rollover-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment,GenerateName:,Namespace:e2e-tests-deployment-g9d4t,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-g9d4t/deployments/test-rollover-deployment,UID:5ba4e2d0-8246-11e9-9b18-c2b4512ea1b9,ResourceVersion:948970934,Generation:2,CreationTimestamp:2019-05-29 19:17:10 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-05-29 19:17:10 +0000 UTC 2019-05-29 19:17:10 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-05-29 19:17:25 +0000 UTC 2019-05-29 19:17:10 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rollover-deployment-6b7f9d6597" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+May 29 19:17:26.979: INFO: New ReplicaSet "test-rollover-deployment-6b7f9d6597" of Deployment "test-rollover-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6b7f9d6597,GenerateName:,Namespace:e2e-tests-deployment-g9d4t,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-g9d4t/replicasets/test-rollover-deployment-6b7f9d6597,UID:5cdff999-8246-11e9-9b18-c2b4512ea1b9,ResourceVersion:948970924,Generation:2,CreationTimestamp:2019-05-29 19:17:12 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 5ba4e2d0-8246-11e9-9b18-c2b4512ea1b9 0xc001d870b7 0xc001d870b8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+May 29 19:17:26.979: INFO: All old ReplicaSets of Deployment "test-rollover-deployment":
+May 29 19:17:26.980: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-controller,GenerateName:,Namespace:e2e-tests-deployment-g9d4t,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-g9d4t/replicasets/test-rollover-controller,UID:5774617b-8246-11e9-9b18-c2b4512ea1b9,ResourceVersion:948970933,Generation:2,CreationTimestamp:2019-05-29 19:17:03 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 5ba4e2d0-8246-11e9-9b18-c2b4512ea1b9 0xc001d86eb7 0xc001d86eb8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May 29 19:17:26.980: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6586df867b,GenerateName:,Namespace:e2e-tests-deployment-g9d4t,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-g9d4t/replicasets/test-rollover-deployment-6586df867b,UID:5ba70e7b-8246-11e9-9b18-c2b4512ea1b9,ResourceVersion:948969967,Generation:2,CreationTimestamp:2019-05-29 19:17:10 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 5ba4e2d0-8246-11e9-9b18-c2b4512ea1b9 0xc001d86f77 0xc001d86f78}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May 29 19:17:26.987: INFO: Pod "test-rollover-deployment-6b7f9d6597-dsdv6" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6b7f9d6597-dsdv6,GenerateName:test-rollover-deployment-6b7f9d6597-,Namespace:e2e-tests-deployment-g9d4t,SelfLink:/api/v1/namespaces/e2e-tests-deployment-g9d4t/pods/test-rollover-deployment-6b7f9d6597-dsdv6,UID:5ce468e2-8246-11e9-9b18-c2b4512ea1b9,ResourceVersion:948970172,Generation:0,CreationTimestamp:2019-05-29 19:17:12 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rollover-deployment-6b7f9d6597 5cdff999-8246-11e9-9b18-c2b4512ea1b9 0xc001d87c27 0xc001d87c28}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-9scq5 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-9scq5,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-9scq5 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d87c90} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d87cb0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:17:12 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:17:15 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:17:15 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:17:12 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:100.64.1.148,StartTime:2019-05-29 19:17:12 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-05-29 19:17:14 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://66e61446832fed6e1b033bcca65f19a15638db50b6b09c050a7e430b3bc4116e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:17:26.987: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-g9d4t" for this suite.
+May 29 19:17:33.015: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:17:33.175: INFO: namespace: e2e-tests-deployment-g9d4t, resource: bindings, ignored listing per whitelist
+May 29 19:17:33.224: INFO: namespace e2e-tests-deployment-g9d4t deletion completed in 6.229270485s
+
+• [SLOW TEST:29.769 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should support rollover [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] KubeletManagedEtcHosts 
+  should test kubelet managed /etc/hosts file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:17:33.227: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-e2e-kubelet-etc-hosts-66dmz
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should test kubelet managed /etc/hosts file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Setting up the test
+STEP: Creating hostNetwork=false pod
+STEP: Creating hostNetwork=true pod
+STEP: Running the test
+STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false
+May 29 19:17:41.601: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:41.601: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:41.774: INFO: Exec stderr: ""
+May 29 19:17:41.774: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:41.774: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:41.920: INFO: Exec stderr: ""
+May 29 19:17:41.920: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:41.920: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:42.050: INFO: Exec stderr: ""
+May 29 19:17:42.050: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:42.050: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:42.175: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount
+May 29 19:17:42.175: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:42.175: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:42.321: INFO: Exec stderr: ""
+May 29 19:17:42.321: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:42.321: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:42.462: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true
+May 29 19:17:42.462: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:42.462: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:42.615: INFO: Exec stderr: ""
+May 29 19:17:42.615: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:42.615: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:42.738: INFO: Exec stderr: ""
+May 29 19:17:42.738: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:42.738: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:42.883: INFO: Exec stderr: ""
+May 29 19:17:42.883: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-66dmz PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:17:42.883: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:17:43.036: INFO: Exec stderr: ""
+[AfterEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:17:43.036: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-e2e-kubelet-etc-hosts-66dmz" for this suite.
+May 29 19:18:21.091: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:18:21.269: INFO: namespace: e2e-tests-e2e-kubelet-etc-hosts-66dmz, resource: bindings, ignored listing per whitelist
+May 29 19:18:21.355: INFO: namespace e2e-tests-e2e-kubelet-etc-hosts-66dmz deletion completed in 38.288625636s
+
+• [SLOW TEST:48.128 seconds]
+[k8s.io] KubeletManagedEtcHosts
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should test kubelet managed /etc/hosts file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:18:21.355: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-9zvdj
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test override all
+May 29 19:18:21.668: INFO: Waiting up to 5m0s for pod "client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-containers-9zvdj" to be "success or failure"
+May 29 19:18:21.678: INFO: Pod "client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 10.213688ms
+May 29 19:18:23.686: INFO: Pod "client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018295489s
+May 29 19:18:25.693: INFO: Pod "client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025736764s
+STEP: Saw pod success
+May 29 19:18:25.693: INFO: Pod "client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:18:25.700: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:18:25.728: INFO: Waiting for pod client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:18:25.734: INFO: Pod client-containers-85db0995-8246-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:18:25.734: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-containers-9zvdj" for this suite.
+May 29 19:18:31.780: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:18:31.968: INFO: namespace: e2e-tests-containers-9zvdj, resource: bindings, ignored listing per whitelist
+May 29 19:18:32.012: INFO: namespace e2e-tests-containers-9zvdj deletion completed in 6.262829059s
+
+• [SLOW TEST:10.657 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:18:32.012: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-wzsp4
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May 29 19:18:32.291: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-wzsp4" to be "success or failure"
+May 29 19:18:32.297: INFO: Pod "downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.997283ms
+May 29 19:18:34.304: INFO: Pod "downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013057798s
+May 29 19:18:36.312: INFO: Pod "downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021008536s
+STEP: Saw pod success
+May 29 19:18:36.312: INFO: Pod "downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:18:36.318: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d container client-container: 
+STEP: delete the pod
+May 29 19:18:36.348: INFO: Waiting for pod downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:18:36.354: INFO: Pod downwardapi-volume-8c301d3a-8246-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:18:36.354: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-wzsp4" for this suite.
+May 29 19:18:42.392: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:18:42.516: INFO: namespace: e2e-tests-projected-wzsp4, resource: bindings, ignored listing per whitelist
+May 29 19:18:42.668: INFO: namespace e2e-tests-projected-wzsp4 deletion completed in 6.306309917s
+
+• [SLOW TEST:10.656 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should do a rolling update of a replication controller  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:18:42.670: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-bwnpr
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295
+[It] should do a rolling update of a replication controller  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the initial replication controller
+May 29 19:18:42.928: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:43.228: INFO: stderr: ""
+May 29 19:18:43.229: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May 29 19:18:43.229: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:43.364: INFO: stderr: ""
+May 29 19:18:43.364: INFO: stdout: "update-demo-nautilus-lp2t4 update-demo-nautilus-mqplj "
+May 29 19:18:43.364: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-lp2t4 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:43.487: INFO: stderr: ""
+May 29 19:18:43.487: INFO: stdout: ""
+May 29 19:18:43.487: INFO: update-demo-nautilus-lp2t4 is created but not running
+May 29 19:18:48.487: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:48.630: INFO: stderr: ""
+May 29 19:18:48.630: INFO: stdout: "update-demo-nautilus-lp2t4 update-demo-nautilus-mqplj "
+May 29 19:18:48.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-lp2t4 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:48.752: INFO: stderr: ""
+May 29 19:18:48.752: INFO: stdout: "true"
+May 29 19:18:48.752: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-lp2t4 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:48.874: INFO: stderr: ""
+May 29 19:18:48.874: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:18:48.874: INFO: validating pod update-demo-nautilus-lp2t4
+May 29 19:18:48.967: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:18:48.967: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:18:48.967: INFO: update-demo-nautilus-lp2t4 is verified up and running
+May 29 19:18:48.968: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-mqplj -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:49.073: INFO: stderr: ""
+May 29 19:18:49.073: INFO: stdout: "true"
+May 29 19:18:49.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-mqplj -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:18:49.184: INFO: stderr: ""
+May 29 19:18:49.184: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:18:49.184: INFO: validating pod update-demo-nautilus-mqplj
+May 29 19:18:49.283: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:18:49.284: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:18:49.284: INFO: update-demo-nautilus-mqplj is verified up and running
+STEP: rolling-update to new replication controller
+May 29 19:18:49.285: INFO: scanned /root for discovery docs: 
+May 29 19:18:49.286: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 rolling-update update-demo-nautilus --update-period=1s -f - --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:19:17.012: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+May 29 19:19:17.012: INFO: stdout: "Created update-demo-kitten\nScaling up update-demo-kitten from 0 to 2, scaling down update-demo-nautilus from 2 to 0 (keep 2 pods available, don't exceed 3 pods)\nScaling update-demo-kitten up to 1\nScaling update-demo-nautilus down to 1\nScaling update-demo-kitten up to 2\nScaling update-demo-nautilus down to 0\nUpdate succeeded. Deleting old controller: update-demo-nautilus\nRenaming update-demo-kitten to update-demo-nautilus\nreplicationcontroller/update-demo-nautilus rolling updated\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May 29 19:19:17.012: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:19:17.148: INFO: stderr: ""
+May 29 19:19:17.148: INFO: stdout: "update-demo-kitten-d499r update-demo-kitten-drcqv "
+May 29 19:19:17.148: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-kitten-d499r -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:19:17.266: INFO: stderr: ""
+May 29 19:19:17.266: INFO: stdout: "true"
+May 29 19:19:17.266: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-kitten-d499r -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:19:17.396: INFO: stderr: ""
+May 29 19:19:17.396: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0"
+May 29 19:19:17.396: INFO: validating pod update-demo-kitten-d499r
+May 29 19:19:17.491: INFO: got data: {
+  "image": "kitten.jpg"
+}
+
+May 29 19:19:17.491: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg .
+May 29 19:19:17.491: INFO: update-demo-kitten-d499r is verified up and running
+May 29 19:19:17.491: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-kitten-drcqv -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:19:17.614: INFO: stderr: ""
+May 29 19:19:17.614: INFO: stdout: "true"
+May 29 19:19:17.614: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-kitten-drcqv -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-bwnpr'
+May 29 19:19:17.734: INFO: stderr: ""
+May 29 19:19:17.734: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0"
+May 29 19:19:17.734: INFO: validating pod update-demo-kitten-drcqv
+May 29 19:19:17.836: INFO: got data: {
+  "image": "kitten.jpg"
+}
+
+May 29 19:19:17.836: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg .
+May 29 19:19:17.836: INFO: update-demo-kitten-drcqv is verified up and running
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:19:17.836: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-bwnpr" for this suite.
+May 29 19:19:39.876: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:19:40.125: INFO: namespace: e2e-tests-kubectl-bwnpr, resource: bindings, ignored listing per whitelist
+May 29 19:19:40.197: INFO: namespace e2e-tests-kubectl-bwnpr deletion completed in 22.353467095s
+
+• [SLOW TEST:57.528 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Update Demo
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should do a rolling update of a replication controller  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:19:40.198: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-hkqzf
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0666 on node default medium
+May 29 19:19:40.482: INFO: Waiting up to 5m0s for pod "pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-hkqzf" to be "success or failure"
+May 29 19:19:40.488: INFO: Pod "pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.095016ms
+May 29 19:19:42.495: INFO: Pod "pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013333559s
+May 29 19:19:44.505: INFO: Pod "pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022648555s
+STEP: Saw pod success
+May 29 19:19:44.505: INFO: Pod "pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:19:44.517: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:19:44.543: INFO: Waiting for pod pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:19:44.549: INFO: Pod pod-b4d53a31-8246-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:19:44.549: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-hkqzf" for this suite.
+May 29 19:19:50.587: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:19:50.679: INFO: namespace: e2e-tests-emptydir-hkqzf, resource: bindings, ignored listing per whitelist
+May 29 19:19:50.816: INFO: namespace e2e-tests-emptydir-hkqzf deletion completed in 6.260217068s
+
+• [SLOW TEST:10.619 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0666,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Burst scaling should run to completion even with unhealthy pods [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:19:50.817: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename statefulset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-pd8gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace e2e-tests-statefulset-pd8gc
+[It] Burst scaling should run to completion even with unhealthy pods [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating stateful set ss in namespace e2e-tests-statefulset-pd8gc
+STEP: Waiting until all stateful set ss replicas will be running in namespace e2e-tests-statefulset-pd8gc
+May 29 19:19:51.107: INFO: Found 0 stateful pods, waiting for 1
+May 29 19:20:01.129: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod
+May 29 19:20:01.140: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-pd8gc ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May 29 19:20:01.409: INFO: stderr: ""
+May 29 19:20:01.409: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May 29 19:20:01.409: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+May 29 19:20:01.419: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true
+May 29 19:20:11.436: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+May 29 19:20:11.437: INFO: Waiting for statefulset status.replicas updated to 0
+May 29 19:20:11.470: INFO: POD   NODE                                            PHASE    GRACE  CONDITIONS
+May 29 19:20:11.470: INFO: ss-0  scw-sono13-default-71171af685174eada6c25c1541e  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:02 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:02 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  }]
+May 29 19:20:11.470: INFO: 
+May 29 19:20:11.470: INFO: StatefulSet ss has not reached scale 3, at 1
+May 29 19:20:12.479: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.990431034s
+May 29 19:20:13.489: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.981171999s
+May 29 19:20:14.498: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.971339737s
+May 29 19:20:15.505: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.962730996s
+May 29 19:20:16.514: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.955574865s
+May 29 19:20:17.523: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.946775164s
+May 29 19:20:18.532: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.937514117s
+May 29 19:20:19.540: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.928852768s
+May 29 19:20:20.549: INFO: Verifying statefulset ss doesn't scale past 3 for another 920.80432ms
+STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace e2e-tests-statefulset-pd8gc
+May 29 19:20:21.568: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-pd8gc ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+May 29 19:20:21.844: INFO: stderr: ""
+May 29 19:20:21.844: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+May 29 19:20:21.844: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+May 29 19:20:21.844: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-pd8gc ss-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+May 29 19:20:22.205: INFO: stderr: "mv: can't rename '/tmp/index.html': No such file or directory\n"
+May 29 19:20:22.205: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+May 29 19:20:22.205: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+May 29 19:20:22.205: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-pd8gc ss-2 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+May 29 19:20:22.465: INFO: stderr: "mv: can't rename '/tmp/index.html': No such file or directory\n"
+May 29 19:20:22.465: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+May 29 19:20:22.465: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+May 29 19:20:22.474: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+May 29 19:20:22.474: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true
+May 29 19:20:22.474: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Scale down will not halt with unhealthy stateful pod
+May 29 19:20:22.482: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-pd8gc ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May 29 19:20:22.753: INFO: stderr: ""
+May 29 19:20:22.753: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May 29 19:20:22.753: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+May 29 19:20:22.753: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-pd8gc ss-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May 29 19:20:23.048: INFO: stderr: ""
+May 29 19:20:23.048: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May 29 19:20:23.048: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+May 29 19:20:23.048: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-pd8gc ss-2 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May 29 19:20:23.324: INFO: stderr: ""
+May 29 19:20:23.324: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May 29 19:20:23.324: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+May 29 19:20:23.324: INFO: Waiting for statefulset status.replicas updated to 0
+May 29 19:20:23.331: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 1
+May 29 19:20:33.353: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+May 29 19:20:33.353: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false
+May 29 19:20:33.353: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false
+May 29 19:20:33.376: INFO: POD   NODE                                            PHASE    GRACE  CONDITIONS
+May 29 19:20:33.376: INFO: ss-0  scw-sono13-default-71171af685174eada6c25c1541e  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  }]
+May 29 19:20:33.376: INFO: ss-1  scw-sono13-default-2865dd8133304358ae8da697bb2  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  }]
+May 29 19:20:33.376: INFO: ss-2  scw-sono13-default-71171af685174eada6c25c1541e  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  }]
+May 29 19:20:33.376: INFO: 
+May 29 19:20:33.376: INFO: StatefulSet ss has not reached scale 0, at 3
+May 29 19:20:34.385: INFO: POD   NODE                                            PHASE    GRACE  CONDITIONS
+May 29 19:20:34.385: INFO: ss-0  scw-sono13-default-71171af685174eada6c25c1541e  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  }]
+May 29 19:20:34.385: INFO: ss-1  scw-sono13-default-2865dd8133304358ae8da697bb2  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  }]
+May 29 19:20:34.385: INFO: ss-2  scw-sono13-default-71171af685174eada6c25c1541e  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  }]
+May 29 19:20:34.386: INFO: 
+May 29 19:20:34.386: INFO: StatefulSet ss has not reached scale 0, at 3
+May 29 19:20:35.394: INFO: POD   NODE                                            PHASE    GRACE  CONDITIONS
+May 29 19:20:35.394: INFO: ss-0  scw-sono13-default-71171af685174eada6c25c1541e  Pending  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:19:51 +0000 UTC  }]
+May 29 19:20:35.394: INFO: ss-1  scw-sono13-default-2865dd8133304358ae8da697bb2  Pending  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  }]
+May 29 19:20:35.394: INFO: ss-2  scw-sono13-default-71171af685174eada6c25c1541e  Pending  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:20:11 +0000 UTC  }]
+May 29 19:20:35.394: INFO: 
+May 29 19:20:35.394: INFO: StatefulSet ss has not reached scale 0, at 3
+May 29 19:20:36.403: INFO: Verifying statefulset ss doesn't scale past 0 for another 6.973496687s
+May 29 19:20:37.411: INFO: Verifying statefulset ss doesn't scale past 0 for another 5.964657847s
+May 29 19:20:38.419: INFO: Verifying statefulset ss doesn't scale past 0 for another 4.956868862s
+May 29 19:20:39.429: INFO: Verifying statefulset ss doesn't scale past 0 for another 3.948471503s
+May 29 19:20:40.437: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.938706303s
+May 29 19:20:41.444: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.930991578s
+May 29 19:20:42.453: INFO: Verifying statefulset ss doesn't scale past 0 for another 923.384125ms
+STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacee2e-tests-statefulset-pd8gc
+May 29 19:20:43.468: INFO: Scaling statefulset ss to 0
+May 29 19:20:43.490: INFO: Waiting for statefulset status.replicas updated to 0
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+May 29 19:20:43.497: INFO: Deleting all statefulset in ns e2e-tests-statefulset-pd8gc
+May 29 19:20:43.504: INFO: Scaling statefulset ss to 0
+May 29 19:20:43.525: INFO: Waiting for statefulset status.replicas updated to 0
+May 29 19:20:43.532: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:20:43.555: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-statefulset-pd8gc" for this suite.
+May 29 19:20:49.599: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:20:49.659: INFO: namespace: e2e-tests-statefulset-pd8gc, resource: bindings, ignored listing per whitelist
+May 29 19:20:49.827: INFO: namespace e2e-tests-statefulset-pd8gc deletion completed in 6.264318452s
+
+• [SLOW TEST:59.010 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    Burst scaling should run to completion even with unhealthy pods [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] version v1
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:20:49.827: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename proxy
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-proxy-lbxlh
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:20:50.102: INFO: (0) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 11.701464ms)
+May 29 19:20:50.110: INFO: (1) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.19321ms)
+May 29 19:20:50.118: INFO: (2) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.258862ms)
+May 29 19:20:50.127: INFO: (3) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.586719ms)
+May 29 19:20:50.136: INFO: (4) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.833121ms)
+May 29 19:20:50.144: INFO: (5) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.051464ms)
+May 29 19:20:50.152: INFO: (6) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.348692ms)
+May 29 19:20:50.163: INFO: (7) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 10.903734ms)
+May 29 19:20:50.172: INFO: (8) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.431397ms)
+May 29 19:20:50.180: INFO: (9) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 7.909716ms)
+May 29 19:20:50.192: INFO: (10) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 11.739976ms)
+May 29 19:20:50.200: INFO: (11) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.151271ms)
+May 29 19:20:50.208: INFO: (12) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 7.975881ms)
+May 29 19:20:50.216: INFO: (13) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.185916ms)
+May 29 19:20:50.224: INFO: (14) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 8.33182ms)
+May 29 19:20:50.232: INFO: (15) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 7.420396ms)
+May 29 19:20:50.239: INFO: (16) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 7.369541ms)
+May 29 19:20:50.251: INFO: (17) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 12.057321ms)
+May 29 19:20:50.266: INFO: (18) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 14.372935ms)
+May 29 19:20:50.275: INFO: (19) /api/v1/nodes/scw-sono13-default-2865dd8133304358ae8da697bb2/proxy/logs/: 
+alternatives.log
+apt/
+... (200; 9.407011ms)
+[AfterEach] version v1
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:20:50.275: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-proxy-lbxlh" for this suite.
+May 29 19:20:56.303: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:20:56.443: INFO: namespace: e2e-tests-proxy-lbxlh, resource: bindings, ignored listing per whitelist
+May 29 19:20:56.605: INFO: namespace e2e-tests-proxy-lbxlh deletion completed in 6.323181327s
+
+• [SLOW TEST:6.777 seconds]
+[sig-network] Proxy
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  version v1
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/proxy.go:56
+    should proxy logs on node using proxy subresource  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should not start app containers if init containers fail on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:20:56.605: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename init-container
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-srvxj
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+May 29 19:20:56.935: INFO: PodSpec: initContainers in spec.initContainers
+May 29 19:21:40.218: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-e268dba2-8246-11e9-bd6e-667e8fbec69d", GenerateName:"", Namespace:"e2e-tests-init-container-srvxj", SelfLink:"/api/v1/namespaces/e2e-tests-init-container-srvxj/pods/pod-init-e268dba2-8246-11e9-bd6e-667e8fbec69d", UID:"e26a0231-8246-11e9-9b18-c2b4512ea1b9", ResourceVersion:"948990550", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63694754456, loc:(*time.Location)(0x7b33b80)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"935522231"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-trntx", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc001719000), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-trntx", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-trntx", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.1", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-trntx", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc001f849d8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"scw-sono13-default-71171af685174eada6c25c1541e", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc000aa94a0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc001f84a50)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc001f84a70)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc001f84a78), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc001f84a7c)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754456, loc:(*time.Location)(0x7b33b80)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754456, loc:(*time.Location)(0x7b33b80)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754456, loc:(*time.Location)(0x7b33b80)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754456, loc:(*time.Location)(0x7b33b80)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"10.12.157.201", PodIP:"100.64.1.156", StartTime:(*v1.Time)(0xc000a46b60), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0014153b0)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc001415420)}, Ready:false, RestartCount:3, Image:"busybox:1.29", ImageID:"docker-pullable://busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"docker://d0db2097d81641e67e1041d6b67ec20a28d421f9dcf0f3cfe4f18bd2f5788492"}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc000a46ba0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:""}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc000a46b80), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.1", ImageID:"", ContainerID:""}}, QOSClass:"Guaranteed"}}
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:21:40.219: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-srvxj" for this suite.
+May 29 19:22:02.267: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:22:02.427: INFO: namespace: e2e-tests-init-container-srvxj, resource: bindings, ignored listing per whitelist
+May 29 19:22:02.516: INFO: namespace e2e-tests-init-container-srvxj deletion completed in 22.278518145s
+
+• [SLOW TEST:65.912 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should not start app containers if init containers fail on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:22:02.517: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-mswfz
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Starting the proxy
+May 29 19:22:02.792: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-329215334 proxy --unix-socket=/tmp/kubectl-proxy-unix088903053/test'
+STEP: retrieving proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:22:02.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-mswfz" for this suite.
+May 29 19:22:08.896: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:22:09.012: INFO: namespace: e2e-tests-kubectl-mswfz, resource: bindings, ignored listing per whitelist
+May 29 19:22:09.132: INFO: namespace e2e-tests-kubectl-mswfz deletion completed in 6.259396109s
+
+• [SLOW TEST:6.616 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Proxy server
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should support --unix-socket=/path  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:22:09.132: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename statefulset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-2br9h
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace e2e-tests-statefulset-2br9h
+[It] Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Looking for a node to schedule stateful set and pod
+STEP: Creating pod with conflicting port in namespace e2e-tests-statefulset-2br9h
+STEP: Creating statefulset with conflicting port in namespace e2e-tests-statefulset-2br9h
+STEP: Waiting until pod test-pod will start running in namespace e2e-tests-statefulset-2br9h
+STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace e2e-tests-statefulset-2br9h
+May 29 19:22:13.464: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-2br9h, name: ss-0, uid: 0fe5da2b-8247-11e9-9b18-c2b4512ea1b9, status phase: Pending. Waiting for statefulset controller to delete.
+May 29 19:22:13.838: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-2br9h, name: ss-0, uid: 0fe5da2b-8247-11e9-9b18-c2b4512ea1b9, status phase: Failed. Waiting for statefulset controller to delete.
+May 29 19:22:13.846: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-2br9h, name: ss-0, uid: 0fe5da2b-8247-11e9-9b18-c2b4512ea1b9, status phase: Failed. Waiting for statefulset controller to delete.
+May 29 19:22:13.851: INFO: Observed delete event for stateful pod ss-0 in namespace e2e-tests-statefulset-2br9h
+STEP: Removing pod with conflicting port in namespace e2e-tests-statefulset-2br9h
+STEP: Waiting when stateful pod ss-0 will be recreated in namespace e2e-tests-statefulset-2br9h and will be in running state
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+May 29 19:22:17.888: INFO: Deleting all statefulset in ns e2e-tests-statefulset-2br9h
+May 29 19:22:17.897: INFO: Scaling statefulset ss to 0
+May 29 19:22:27.936: INFO: Waiting for statefulset status.replicas updated to 0
+May 29 19:22:27.943: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:22:27.969: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-statefulset-2br9h" for this suite.
+May 29 19:22:36.000: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:22:36.261: INFO: namespace: e2e-tests-statefulset-2br9h, resource: bindings, ignored listing per whitelist
+May 29 19:22:36.327: INFO: namespace e2e-tests-statefulset-2br9h deletion completed in 8.348550803s
+
+• [SLOW TEST:27.194 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    Should recreate evicted statefulset [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:22:36.327: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-sgnk9
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: starting the proxy server
+May 29 19:22:36.597: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-329215334 proxy -p 0 --disable-filter'
+STEP: curling proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:22:36.704: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-sgnk9" for this suite.
+May 29 19:22:42.736: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:22:42.794: INFO: namespace: e2e-tests-kubectl-sgnk9, resource: bindings, ignored listing per whitelist
+May 29 19:22:42.965: INFO: namespace e2e-tests-kubectl-sgnk9 deletion completed in 6.251194896s
+
+• [SLOW TEST:6.638 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Proxy server
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should support proxy with --port 0  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:22:42.965: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-gcj7q
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:22:43.247: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubelet-test-gcj7q" for this suite.
+May 29 19:23:05.275: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:23:05.385: INFO: namespace: e2e-tests-kubelet-test-gcj7q, resource: bindings, ignored listing per whitelist
+May 29 19:23:05.509: INFO: namespace e2e-tests-kubelet-test-gcj7q deletion completed in 22.255287237s
+
+• [SLOW TEST:22.544 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should be possible to delete [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should create and stop a replication controller  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:23:05.510: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-vkgp6
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295
+[It] should create and stop a replication controller  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating a replication controller
+May 29 19:23:05.825: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:06.626: INFO: stderr: ""
+May 29 19:23:06.626: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May 29 19:23:06.626: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:06.760: INFO: stderr: ""
+May 29 19:23:06.760: INFO: stdout: "update-demo-nautilus-5x65z update-demo-nautilus-dfxdm "
+May 29 19:23:06.760: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-5x65z -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:06.858: INFO: stderr: ""
+May 29 19:23:06.858: INFO: stdout: ""
+May 29 19:23:06.858: INFO: update-demo-nautilus-5x65z is created but not running
+May 29 19:23:11.858: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:11.982: INFO: stderr: ""
+May 29 19:23:11.982: INFO: stdout: "update-demo-nautilus-5x65z update-demo-nautilus-dfxdm "
+May 29 19:23:11.982: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-5x65z -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:12.089: INFO: stderr: ""
+May 29 19:23:12.089: INFO: stdout: "true"
+May 29 19:23:12.089: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-5x65z -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:12.200: INFO: stderr: ""
+May 29 19:23:12.201: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:23:12.201: INFO: validating pod update-demo-nautilus-5x65z
+May 29 19:23:12.295: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:23:12.295: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:23:12.295: INFO: update-demo-nautilus-5x65z is verified up and running
+May 29 19:23:12.295: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-dfxdm -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:12.422: INFO: stderr: ""
+May 29 19:23:12.422: INFO: stdout: "true"
+May 29 19:23:12.422: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-dfxdm -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:12.533: INFO: stderr: ""
+May 29 19:23:12.533: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:23:12.533: INFO: validating pod update-demo-nautilus-dfxdm
+May 29 19:23:12.627: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:23:12.627: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:23:12.627: INFO: update-demo-nautilus-dfxdm is verified up and running
+STEP: using delete to clean up resources
+May 29 19:23:12.627: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:12.759: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:23:12.759: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n"
+May 29 19:23:12.760: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get rc,svc -l name=update-demo --no-headers --namespace=e2e-tests-kubectl-vkgp6'
+May 29 19:23:12.904: INFO: stderr: "No resources found.\n"
+May 29 19:23:12.904: INFO: stdout: ""
+May 29 19:23:12.904: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -l name=update-demo --namespace=e2e-tests-kubectl-vkgp6 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+May 29 19:23:13.060: INFO: stderr: ""
+May 29 19:23:13.060: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:23:13.060: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-vkgp6" for this suite.
+May 29 19:23:35.099: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:23:35.292: INFO: namespace: e2e-tests-kubectl-vkgp6, resource: bindings, ignored listing per whitelist
+May 29 19:23:35.392: INFO: namespace e2e-tests-kubectl-vkgp6 deletion completed in 22.322017647s
+
+• [SLOW TEST:29.882 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Update Demo
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create and stop a replication controller  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Downward API volume 
+  should set mode on item file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:23:35.393: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-s46kg
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set mode on item file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May 29 19:23:35.711: INFO: Waiting up to 5m0s for pod "downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-s46kg" to be "success or failure"
+May 29 19:23:35.718: INFO: Pod "downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.702848ms
+May 29 19:23:37.728: INFO: Pod "downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016926258s
+May 29 19:23:39.736: INFO: Pod "downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024748142s
+STEP: Saw pod success
+May 29 19:23:39.736: INFO: Pod "downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:23:39.743: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d container client-container: 
+STEP: delete the pod
+May 29 19:23:39.775: INFO: Waiting for pod downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:23:39.781: INFO: Pod downwardapi-volume-410a5ac0-8247-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:23:39.781: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-s46kg" for this suite.
+May 29 19:23:45.823: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:23:45.966: INFO: namespace: e2e-tests-downward-api-s46kg, resource: bindings, ignored listing per whitelist
+May 29 19:23:46.130: INFO: namespace e2e-tests-downward-api-s46kg deletion completed in 6.340166003s
+
+• [SLOW TEST:10.737 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set mode on item file [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] Pods 
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:23:46.130: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-n2cnb
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+STEP: setting up watch
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: verifying pod creation was observed
+May 29 19:23:50.464: INFO: running pod: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-submit-remove-476df33e-8247-11e9-bd6e-667e8fbec69d", GenerateName:"", Namespace:"e2e-tests-pods-n2cnb", SelfLink:"/api/v1/namespaces/e2e-tests-pods-n2cnb/pods/pod-submit-remove-476df33e-8247-11e9-bd6e-667e8fbec69d", UID:"47707a95-8247-11e9-9b18-c2b4512ea1b9", ResourceVersion:"949000526", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63694754626, loc:(*time.Location)(0x7b33b80)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"418779436"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-vjrvg", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc002699780), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"nginx", Image:"docker.io/library/nginx:1.14-alpine", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-vjrvg", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0023d4b88), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"scw-sono13-default-71171af685174eada6c25c1541e", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc002103320), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0023d4bc0)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0023d4d20)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc0023d4d28), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc0023d4d2c)}, Status:v1.PodStatus{Phase:"Running", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754626, loc:(*time.Location)(0x7b33b80)}}, Reason:"", Message:""}, v1.PodCondition{Type:"Ready", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754628, loc:(*time.Location)(0x7b33b80)}}, Reason:"", Message:""}, v1.PodCondition{Type:"ContainersReady", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754628, loc:(*time.Location)(0x7b33b80)}}, Reason:"", Message:""}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63694754626, loc:(*time.Location)(0x7b33b80)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"10.12.157.201", PodIP:"100.64.1.159", StartTime:(*v1.Time)(0xc0016fd2c0), InitContainerStatuses:[]v1.ContainerStatus(nil), ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"nginx", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(0xc0016fd2e0), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:true, RestartCount:0, Image:"nginx:1.14-alpine", ImageID:"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7", ContainerID:"docker://cafe6eb89e017b0e3feb002fbbf8f901d96646221a5f91d71e4f53461a77ebd6"}}, QOSClass:"BestEffort"}}
+STEP: deleting the pod gracefully
+STEP: verifying the kubelet observed the termination notice
+STEP: verifying pod deletion was observed
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:24:04.021: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-n2cnb" for this suite.
+May 29 19:24:10.051: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:24:10.267: INFO: namespace: e2e-tests-pods-n2cnb, resource: bindings, ignored listing per whitelist
+May 29 19:24:10.279: INFO: namespace e2e-tests-pods-n2cnb deletion completed in 6.249526181s
+
+• [SLOW TEST:24.149 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for intra-pod communication: udp [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:24:10.279: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-pcccb
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for intra-pod communication: udp [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-pcccb
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+May 29 19:24:10.567: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+May 29 19:24:34.720: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.64.0.80:8080/dial?request=hostName&protocol=udp&host=100.64.1.160&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-pcccb PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:24:34.720: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:24:34.862: INFO: Waiting for endpoints: map[]
+May 29 19:24:34.868: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://100.64.0.80:8080/dial?request=hostName&protocol=udp&host=100.64.0.79&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-pcccb PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:24:34.868: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:24:35.002: INFO: Waiting for endpoints: map[]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:24:35.003: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pod-network-test-pcccb" for this suite.
+May 29 19:24:57.042: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:24:57.173: INFO: namespace: e2e-tests-pod-network-test-pcccb, resource: bindings, ignored listing per whitelist
+May 29 19:24:57.286: INFO: namespace e2e-tests-pod-network-test-pcccb deletion completed in 22.274478174s
+
+• [SLOW TEST:47.007 seconds]
+[sig-network] Networking
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for intra-pod communication: udp [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl describe 
+  should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:24:57.287: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-5ldgm
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:24:57.570: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 version --client'
+May 29 19:24:57.655: INFO: stderr: ""
+May 29 19:24:57.655: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.0\", GitCommit:\"ddf47ac13c1a9483ea035a79cd7c10005ff21a6d\", GitTreeState:\"clean\", BuildDate:\"2018-12-03T21:04:45Z\", GoVersion:\"go1.11.2\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n"
+May 29 19:24:57.659: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-5ldgm'
+May 29 19:24:57.938: INFO: stderr: ""
+May 29 19:24:57.939: INFO: stdout: "replicationcontroller/redis-master created\n"
+May 29 19:24:57.939: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-5ldgm'
+May 29 19:24:58.201: INFO: stderr: ""
+May 29 19:24:58.201: INFO: stdout: "service/redis-master created\n"
+STEP: Waiting for Redis master to start.
+May 29 19:24:59.211: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:24:59.211: INFO: Found 0 / 1
+May 29 19:25:00.209: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:25:00.209: INFO: Found 1 / 1
+May 29 19:25:00.209: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+May 29 19:25:00.217: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:25:00.217: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+May 29 19:25:00.217: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 describe pod redis-master-gpdlb --namespace=e2e-tests-kubectl-5ldgm'
+May 29 19:25:00.365: INFO: stderr: ""
+May 29 19:25:00.365: INFO: stdout: "Name:               redis-master-gpdlb\nNamespace:          e2e-tests-kubectl-5ldgm\nPriority:           0\nPriorityClassName:  \nNode:               scw-sono13-default-71171af685174eada6c25c1541e/10.12.157.201\nStart Time:         Wed, 29 May 2019 19:24:57 +0000\nLabels:             app=redis\n                    role=master\nAnnotations:        \nStatus:             Running\nIP:                 100.64.1.161\nControlled By:      ReplicationController/redis-master\nContainers:\n  redis-master:\n    Container ID:   docker://b7a3e65058635621a9415a8fe7640d17711806ffee09642d1bd86f9f6c2e335e\n    Image:          gcr.io/kubernetes-e2e-test-images/redis:1.0\n    Image ID:       docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830\n    Port:           6379/TCP\n    Host Port:      0/TCP\n    State:          Running\n      Started:      Wed, 29 May 2019 19:24:59 +0000\n    Ready:          True\n    Restart Count:  0\n    Environment:    \n    Mounts:\n      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bxfw8 (ro)\nConditions:\n  Type              Status\n  Initialized       True \n  Ready             True \n  ContainersReady   True \n  PodScheduled      True \nVolumes:\n  default-token-bxfw8:\n    Type:        Secret (a volume populated by a Secret)\n    SecretName:  default-token-bxfw8\n    Optional:    false\nQoS Class:       BestEffort\nNode-Selectors:  \nTolerations:     node.kubernetes.io/not-ready:NoExecute for 300s\n                 node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n  Type    Reason     Age   From                                                     Message\n  ----    ------     ----  ----                                                     -------\n  Normal  Scheduled  3s    default-scheduler                                        Successfully assigned e2e-tests-kubectl-5ldgm/redis-master-gpdlb to scw-sono13-default-71171af685174eada6c25c1541e\n  Normal  Pulled     1s    kubelet, scw-sono13-default-71171af685174eada6c25c1541e  Container image \"gcr.io/kubernetes-e2e-test-images/redis:1.0\" already present on machine\n  Normal  Created    1s    kubelet, scw-sono13-default-71171af685174eada6c25c1541e  Created container\n  Normal  Started    1s    kubelet, scw-sono13-default-71171af685174eada6c25c1541e  Started container\n"
+May 29 19:25:00.365: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 describe rc redis-master --namespace=e2e-tests-kubectl-5ldgm'
+May 29 19:25:00.540: INFO: stderr: ""
+May 29 19:25:00.540: INFO: stdout: "Name:         redis-master\nNamespace:    e2e-tests-kubectl-5ldgm\nSelector:     app=redis,role=master\nLabels:       app=redis\n              role=master\nAnnotations:  \nReplicas:     1 current / 1 desired\nPods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n  Labels:  app=redis\n           role=master\n  Containers:\n   redis-master:\n    Image:        gcr.io/kubernetes-e2e-test-images/redis:1.0\n    Port:         6379/TCP\n    Host Port:    0/TCP\n    Environment:  \n    Mounts:       \n  Volumes:        \nEvents:\n  Type    Reason            Age   From                    Message\n  ----    ------            ----  ----                    -------\n  Normal  SuccessfulCreate  3s    replication-controller  Created pod: redis-master-gpdlb\n"
+May 29 19:25:00.540: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 describe service redis-master --namespace=e2e-tests-kubectl-5ldgm'
+May 29 19:25:00.688: INFO: stderr: ""
+May 29 19:25:00.688: INFO: stdout: "Name:              redis-master\nNamespace:         e2e-tests-kubectl-5ldgm\nLabels:            app=redis\n                   role=master\nAnnotations:       \nSelector:          app=redis,role=master\nType:              ClusterIP\nIP:                10.44.99.141\nPort:                6379/TCP\nTargetPort:        redis-server/TCP\nEndpoints:         100.64.1.161:6379\nSession Affinity:  None\nEvents:            \n"
+May 29 19:25:00.702: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 describe node scw-sono13-default-2865dd8133304358ae8da697bb2'
+May 29 19:25:00.859: INFO: stderr: ""
+May 29 19:25:00.859: INFO: stdout: "Name:               scw-sono13-default-2865dd8133304358ae8da697bb2\nRoles:              \nLabels:             beta.kubernetes.io/arch=amd64\n                    beta.kubernetes.io/instance-type=GP1-XS\n                    beta.kubernetes.io/os=linux\n                    cloud.scaleway.com/scw-clusterid=73f460ed-59ba-40b2-9459-8137fe90e4ec\n                    cloud.scaleway.com/scw-clustername=sono13\n                    cloud.scaleway.com/scw-cniname=flannel\n                    cloud.scaleway.com/scw-nodeid=2865dd81-3330-4358-ae8d-a697bb2621dd\n                    cloud.scaleway.com/scw-nodename=scw-sono13-default-2865dd8133304358ae8da697bb2\n                    cloud.scaleway.com/scw-poolid=1172a9c3-a898-4127-89d0-3e4a31d62d12\n                    cloud.scaleway.com/scw-poolname=default\n                    failure-domain.beta.kubernetes.io/region=par1\n                    failure-domain.beta.kubernetes.io/zone=14-3-402\n                    kubernetes.io/hostname=scw-sono13-default-2865dd8133304358ae8da697bb2\nAnnotations:        flannel.alpha.coreos.com/backend-data: {\"VtepMAC\":\"6e:3d:84:8c:1e:d2\"}\n                    flannel.alpha.coreos.com/backend-type: vxlan\n                    flannel.alpha.coreos.com/kube-subnet-manager: true\n                    flannel.alpha.coreos.com/public-ip: 10.12.149.215\n                    node.alpha.kubernetes.io/ttl: 0\n                    volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp:  Wed, 29 May 2019 18:14:10 +0000\nTaints:             \nUnschedulable:      false\nConditions:\n  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message\n  ----             ------  -----------------                 ------------------                ------                       -------\n  KernelDeadlock   False   Wed, 29 May 2019 19:24:24 +0000   Wed, 29 May 2019 18:14:41 +0000   KernelHasNoDeadlock          kernel has no deadlock\n  MemoryPressure   False   Wed, 29 May 2019 19:24:51 +0000   Wed, 29 May 2019 18:14:10 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available\n  DiskPressure     False   Wed, 29 May 2019 19:24:51 +0000   Wed, 29 May 2019 18:14:10 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure\n  PIDPressure      False   Wed, 29 May 2019 19:24:51 +0000   Wed, 29 May 2019 18:14:10 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available\n  Ready            True    Wed, 29 May 2019 19:24:51 +0000   Wed, 29 May 2019 18:14:30 +0000   KubeletReady                 kubelet is posting ready status. AppArmor enabled\nAddresses:\n  Hostname:    scw-sono13-default-2865dd8133304358ae8da697bb2\n  InternalIP:  10.12.149.215\n  ExternalIP:  212.47.254.30\nCapacity:\n cpu:                4\n ephemeral-storage:  143957176Ki\n hugepages-1Gi:      0\n hugepages-2Mi:      0\n memory:             16421992Ki\n pods:               110\nAllocatable:\n cpu:                3800m\n ephemeral-storage:  133471416Ki\n hugepages-1Gi:      0\n hugepages-2Mi:      0\n memory:             15373416Ki\n pods:               110\nSystem Info:\n Machine ID:                 fa1e4ed873c845588d4d98ed26f5013c\n System UUID:                fa1e4ed873c845588d4d98ed26f5013c\n Boot ID:                    c8ff2c5e-3f1c-4978-86a8-91517546f5b4\n Kernel Version:             4.15.0-50-generic\n OS Image:                   Ubuntu 18.04.2 LTS\n Operating System:           linux\n Architecture:               amd64\n Container Runtime Version:  docker://18.6.3\n Kubelet Version:            v1.13.1\n Kube-Proxy Version:         v1.13.1\nPodCIDR:                     100.64.0.0/24\nProviderID:                  scaleway://1989ee5c-a638-4923-8a1f-53f4b8e4987b\nNon-terminated Pods:         (9 in total)\n  Namespace                  Name                                                       CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE\n  ---------                  ----                                                       ------------  ----------  ---------------  -------------  ---\n  heptio-sonobuoy            sonobuoy-systemd-logs-daemon-set-537397329e444263-krxc2    0 (0%)        0 (0%)      0 (0%)           0 (0%)         69m\n  kube-system                coredns-59b5b6c955-ssfxs                                   100m (2%)     0 (0%)      70Mi (0%)        170Mi (1%)     72m\n  kube-system                flannel-nnv2c                                              100m (2%)     100m (2%)   50Mi (0%)        50Mi (0%)      70m\n  kube-system                heapster-d8d4579b6-fzrlt                                   0 (0%)        0 (0%)      0 (0%)           0 (0%)         72m\n  kube-system                kube-proxy-s4qs6                                           0 (0%)        0 (0%)      0 (0%)           0 (0%)         70m\n  kube-system                kubernetes-dashboard-794fb6974c-d7btd                      0 (0%)        0 (0%)      0 (0%)           0 (0%)         72m\n  kube-system                metrics-server-794596bd9d-x9dz9                            0 (0%)        0 (0%)      0 (0%)           0 (0%)         72m\n  kube-system                monitoring-influxdb-7c84bfcfc8-snwmn                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         72m\n  kube-system                node-problem-detector-6bkln                                20m (0%)      200m (5%)   20Mi (0%)        100Mi (0%)     70m\nAllocated resources:\n  (Total limits may be over 100 percent, i.e., overcommitted.)\n  Resource           Requests    Limits\n  --------           --------    ------\n  cpu                220m (5%)   300m (7%)\n  memory             140Mi (0%)  320Mi (2%)\n  ephemeral-storage  0 (0%)      0 (0%)\nEvents:              \n"
+May 29 19:25:00.863: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 describe namespace e2e-tests-kubectl-5ldgm'
+May 29 19:25:01.009: INFO: stderr: ""
+May 29 19:25:01.009: INFO: stdout: "Name:         e2e-tests-kubectl-5ldgm\nLabels:       e2e-framework=kubectl\n              e2e-run=bd439626-823d-11e9-bd6e-667e8fbec69d\nAnnotations:  \nStatus:       Active\n\nNo resource quota.\n\nNo resource limits.\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:25:01.009: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-5ldgm" for this suite.
+May 29 19:25:23.040: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:25:23.117: INFO: namespace: e2e-tests-kubectl-5ldgm, resource: bindings, ignored listing per whitelist
+May 29 19:25:23.316: INFO: namespace e2e-tests-kubectl-5ldgm deletion completed in 22.298182757s
+
+• [SLOW TEST:26.029 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl describe
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should check if kubectl describe prints relevant information for rc and pods  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-api-machinery] Watchers 
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:25:23.316: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename watch
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-watch-rv8dk
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating a watch on configmaps with a certain label
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: changing the label value of the configmap
+STEP: Expecting to observe a delete notification for the watched object
+May 29 19:25:23.664: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-rv8dk,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv8dk/configmaps/e2e-watch-test-label-changed,UID:815f8eab-8247-11e9-9b18-c2b4512ea1b9,ResourceVersion:949007843,Generation:0,CreationTimestamp:2019-05-29 19:25:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},}
+May 29 19:25:23.664: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-rv8dk,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv8dk/configmaps/e2e-watch-test-label-changed,UID:815f8eab-8247-11e9-9b18-c2b4512ea1b9,ResourceVersion:949007844,Generation:0,CreationTimestamp:2019-05-29 19:25:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+May 29 19:25:23.664: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-rv8dk,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv8dk/configmaps/e2e-watch-test-label-changed,UID:815f8eab-8247-11e9-9b18-c2b4512ea1b9,ResourceVersion:949007846,Generation:0,CreationTimestamp:2019-05-29 19:25:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time
+STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements
+STEP: changing the label value of the configmap back
+STEP: modifying the configmap a third time
+STEP: deleting the configmap
+STEP: Expecting to observe an add notification for the watched object when the label value was restored
+May 29 19:25:33.724: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-rv8dk,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv8dk/configmaps/e2e-watch-test-label-changed,UID:815f8eab-8247-11e9-9b18-c2b4512ea1b9,ResourceVersion:949008580,Generation:0,CreationTimestamp:2019-05-29 19:25:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+May 29 19:25:33.724: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-rv8dk,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv8dk/configmaps/e2e-watch-test-label-changed,UID:815f8eab-8247-11e9-9b18-c2b4512ea1b9,ResourceVersion:949008583,Generation:0,CreationTimestamp:2019-05-29 19:25:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+May 29 19:25:33.724: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-rv8dk,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv8dk/configmaps/e2e-watch-test-label-changed,UID:815f8eab-8247-11e9-9b18-c2b4512ea1b9,ResourceVersion:949008585,Generation:0,CreationTimestamp:2019-05-29 19:25:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:25:33.724: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-watch-rv8dk" for this suite.
+May 29 19:25:39.754: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:25:39.959: INFO: namespace: e2e-tests-watch-rv8dk, resource: bindings, ignored listing per whitelist
+May 29 19:25:39.999: INFO: namespace e2e-tests-watch-rv8dk deletion completed in 6.266177902s
+
+• [SLOW TEST:16.684 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:25:40.000: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename statefulset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-statefulset-tlm74
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace e2e-tests-statefulset-tlm74
+[It] should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a new StatefulSet
+May 29 19:25:40.277: INFO: Found 0 stateful pods, waiting for 3
+May 29 19:25:50.294: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+May 29 19:25:50.294: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+May 29 19:25:50.294: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+May 29 19:25:50.318: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-tlm74 ss2-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May 29 19:25:50.586: INFO: stderr: ""
+May 29 19:25:50.586: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May 29 19:25:50.586: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+STEP: Updating StatefulSet template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine
+May 29 19:26:00.645: INFO: Updating stateful set ss2
+STEP: Creating a new revision
+STEP: Updating Pods in reverse ordinal order
+May 29 19:26:10.692: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-tlm74 ss2-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+May 29 19:26:10.950: INFO: stderr: ""
+May 29 19:26:10.950: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+May 29 19:26:10.950: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+May 29 19:26:21.000: INFO: Waiting for StatefulSet e2e-tests-statefulset-tlm74/ss2 to complete update
+May 29 19:26:21.000: INFO: Waiting for Pod e2e-tests-statefulset-tlm74/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666
+May 29 19:26:21.000: INFO: Waiting for Pod e2e-tests-statefulset-tlm74/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666
+May 29 19:26:21.000: INFO: Waiting for Pod e2e-tests-statefulset-tlm74/ss2-2 to have revision ss2-c79899b9 update revision ss2-787997d666
+May 29 19:26:31.034: INFO: Waiting for StatefulSet e2e-tests-statefulset-tlm74/ss2 to complete update
+May 29 19:26:31.034: INFO: Waiting for Pod e2e-tests-statefulset-tlm74/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666
+STEP: Rolling back to a previous revision
+May 29 19:26:41.015: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-tlm74 ss2-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May 29 19:26:41.258: INFO: stderr: ""
+May 29 19:26:41.258: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May 29 19:26:41.258: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+May 29 19:26:41.304: INFO: Updating stateful set ss2
+STEP: Rolling back update in reverse ordinal order
+May 29 19:26:51.349: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 exec --namespace=e2e-tests-statefulset-tlm74 ss2-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+May 29 19:26:51.586: INFO: stderr: ""
+May 29 19:26:51.586: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+May 29 19:26:51.586: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+May 29 19:27:11.638: INFO: Waiting for StatefulSet e2e-tests-statefulset-tlm74/ss2 to complete update
+May 29 19:27:11.638: INFO: Waiting for Pod e2e-tests-statefulset-tlm74/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+May 29 19:27:21.662: INFO: Deleting all statefulset in ns e2e-tests-statefulset-tlm74
+May 29 19:27:21.669: INFO: Scaling statefulset ss2 to 0
+May 29 19:27:31.706: INFO: Waiting for statefulset status.replicas updated to 0
+May 29 19:27:31.712: INFO: Deleting statefulset ss2
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:27:31.735: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-statefulset-tlm74" for this suite.
+May 29 19:27:39.765: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:27:40.006: INFO: namespace: e2e-tests-statefulset-tlm74, resource: bindings, ignored listing per whitelist
+May 29 19:27:40.068: INFO: namespace e2e-tests-statefulset-tlm74 deletion completed in 8.323422753s
+
+• [SLOW TEST:120.068 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should perform rolling updates and roll backs of template modifications [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:27:40.068: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-f6zn9
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+May 29 19:27:40.348: INFO: Waiting up to 5m0s for pod "pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-f6zn9" to be "success or failure"
+May 29 19:27:40.354: INFO: Pod "pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.492102ms
+May 29 19:27:42.368: INFO: Pod "pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020148993s
+May 29 19:27:44.376: INFO: Pod "pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027652789s
+STEP: Saw pod success
+May 29 19:27:44.376: INFO: Pod "pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:27:44.382: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:27:44.409: INFO: Waiting for pod pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:27:44.414: INFO: Pod pod-d2db1c2c-8247-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:27:44.414: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-f6zn9" for this suite.
+May 29 19:27:50.443: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:27:50.706: INFO: namespace: e2e-tests-emptydir-f6zn9, resource: bindings, ignored listing per whitelist
+May 29 19:27:50.720: INFO: namespace e2e-tests-emptydir-f6zn9 deletion completed in 6.298263556s
+
+• [SLOW TEST:10.651 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0666,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] Kubelet when scheduling a read only busybox container 
+  should not write to root filesystem [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:27:50.720: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubelet-test-flq8z
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should not write to root filesystem [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:27:53.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubelet-test-flq8z" for this suite.
+May 29 19:28:31.109: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:28:31.156: INFO: namespace: e2e-tests-kubelet-test-flq8z, resource: bindings, ignored listing per whitelist
+May 29 19:28:31.328: INFO: namespace e2e-tests-kubelet-test-flq8z deletion completed in 38.241104344s
+
+• [SLOW TEST:40.609 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when scheduling a read only busybox container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:186
+    should not write to root filesystem [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:28:31.329: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename containers
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-containers-85xq4
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test override command
+May 29 19:28:31.612: INFO: Waiting up to 5m0s for pod "client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-containers-85xq4" to be "success or failure"
+May 29 19:28:31.617: INFO: Pod "client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.645901ms
+May 29 19:28:33.624: INFO: Pod "client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012189423s
+May 29 19:28:35.647: INFO: Pod "client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.035386104s
+STEP: Saw pod success
+May 29 19:28:35.648: INFO: Pod "client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:28:35.654: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:28:35.680: INFO: Waiting for pod client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:28:35.685: INFO: Pod client-containers-f169557f-8247-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:28:35.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-containers-85xq4" for this suite.
+May 29 19:28:41.713: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:28:41.846: INFO: namespace: e2e-tests-containers-85xq4, resource: bindings, ignored listing per whitelist
+May 29 19:28:41.942: INFO: namespace e2e-tests-containers-85xq4 deletion completed in 6.249153916s
+
+• [SLOW TEST:10.613 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:28:41.942: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-5k4rq
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-map-f7bde240-8247-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 19:28:42.240: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-5k4rq" to be "success or failure"
+May 29 19:28:42.251: INFO: Pod "pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 10.642923ms
+May 29 19:28:44.257: INFO: Pod "pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017146398s
+May 29 19:28:46.264: INFO: Pod "pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.024032731s
+STEP: Saw pod success
+May 29 19:28:46.264: INFO: Pod "pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:28:46.270: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d container projected-configmap-volume-test: 
+STEP: delete the pod
+May 29 19:28:46.301: INFO: Waiting for pod pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:28:46.307: INFO: Pod pod-projected-configmaps-f7befe6f-8247-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:28:46.307: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-5k4rq" for this suite.
+May 29 19:28:52.340: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:28:52.513: INFO: namespace: e2e-tests-projected-5k4rq, resource: bindings, ignored listing per whitelist
+May 29 19:28:52.630: INFO: namespace e2e-tests-projected-5k4rq deletion completed in 6.315325748s
+
+• [SLOW TEST:10.689 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[k8s.io] [sig-node] PreStop 
+  should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:28:52.631: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename prestop
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-prestop-gkrsr
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating server pod server in namespace e2e-tests-prestop-gkrsr
+STEP: Waiting for pods to come up.
+STEP: Creating tester pod tester in namespace e2e-tests-prestop-gkrsr
+STEP: Deleting pre-stop pod
+May 29 19:29:06.075: INFO: Saw: {
+	"Hostname": "server",
+	"Sent": null,
+	"Received": {
+		"prestop": 1
+	},
+	"Errors": null,
+	"Log": [
+		"default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.",
+		"default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up."
+	],
+	"StillContactingPeers": true
+}
+STEP: Deleting the server pod
+[AfterEach] [k8s.io] [sig-node] PreStop
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:29:06.084: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-prestop-gkrsr" for this suite.
+May 29 19:29:44.113: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:29:44.207: INFO: namespace: e2e-tests-prestop-gkrsr, resource: bindings, ignored listing per whitelist
+May 29 19:29:44.430: INFO: namespace e2e-tests-prestop-gkrsr deletion completed in 38.338324488s
+
+• [SLOW TEST:51.799 seconds]
+[k8s.io] [sig-node] PreStop
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should call prestop when killing a pod  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-node] Downward API 
+  should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:29:44.430: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-rr5r5
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward api env vars
+May 29 19:29:44.714: INFO: Waiting up to 5m0s for pod "downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-rr5r5" to be "success or failure"
+May 29 19:29:44.720: INFO: Pod "downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.833754ms
+May 29 19:29:46.728: INFO: Pod "downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013629668s
+May 29 19:29:48.736: INFO: Pod "downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021830261s
+STEP: Saw pod success
+May 29 19:29:48.736: INFO: Pod "downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:29:48.744: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d container dapi-container: 
+STEP: delete the pod
+May 29 19:29:48.775: INFO: Waiting for pod downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:29:48.781: INFO: Pod downward-api-1cfbe536-8248-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:29:48.781: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-rr5r5" for this suite.
+May 29 19:29:54.819: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:29:54.897: INFO: namespace: e2e-tests-downward-api-rr5r5, resource: bindings, ignored listing per whitelist
+May 29 19:29:55.108: INFO: namespace e2e-tests-downward-api-rr5r5 deletion completed in 6.318729865s
+
+• [SLOW TEST:10.678 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:29:55.109: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-qs8gs
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May 29 19:29:55.428: INFO: Waiting up to 5m0s for pod "downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-qs8gs" to be "success or failure"
+May 29 19:29:55.437: INFO: Pod "downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 9.001949ms
+May 29 19:29:57.444: INFO: Pod "downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016080833s
+May 29 19:29:59.451: INFO: Pod "downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023728415s
+STEP: Saw pod success
+May 29 19:29:59.451: INFO: Pod "downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:29:59.458: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d container client-container: 
+STEP: delete the pod
+May 29 19:29:59.486: INFO: Waiting for pod downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:29:59.492: INFO: Pod downwardapi-volume-235dfce0-8248-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:29:59.492: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-qs8gs" for this suite.
+May 29 19:30:05.531: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:30:05.736: INFO: namespace: e2e-tests-projected-qs8gs, resource: bindings, ignored listing per whitelist
+May 29 19:30:05.854: INFO: namespace e2e-tests-projected-qs8gs deletion completed in 6.353541477s
+
+• [SLOW TEST:10.745 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:30:05.854: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-5pf7j
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:30:06.195: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"29c9efca-8248-11e9-9b18-c2b4512ea1b9", Controller:(*bool)(0xc0024495f2), BlockOwnerDeletion:(*bool)(0xc0024495f3)}}
+May 29 19:30:06.202: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"29c7b185-8248-11e9-9b18-c2b4512ea1b9", Controller:(*bool)(0xc00249f962), BlockOwnerDeletion:(*bool)(0xc00249f963)}}
+May 29 19:30:06.210: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"29c8d5b9-8248-11e9-9b18-c2b4512ea1b9", Controller:(*bool)(0xc00249fba2), BlockOwnerDeletion:(*bool)(0xc00249fba3)}}
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:30:11.226: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-5pf7j" for this suite.
+May 29 19:30:17.263: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:30:17.342: INFO: namespace: e2e-tests-gc-5pf7j, resource: bindings, ignored listing per whitelist
+May 29 19:30:17.518: INFO: namespace e2e-tests-gc-5pf7j deletion completed in 6.282808962s
+
+• [SLOW TEST:11.663 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:30:17.518: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename init-container
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-hgqlv
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+May 29 19:30:17.793: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:30:21.103: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-hgqlv" for this suite.
+May 29 19:30:27.134: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:30:27.337: INFO: namespace: e2e-tests-init-container-hgqlv, resource: bindings, ignored listing per whitelist
+May 29 19:30:27.463: INFO: namespace e2e-tests-init-container-hgqlv deletion completed in 6.351358243s
+
+• [SLOW TEST:9.945 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:30:27.463: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename gc
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-gc-gfwn6
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for all pods to be garbage collected
+STEP: Gathering metrics
+W0529 19:30:37.809129      19 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May 29 19:30:37.809: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:30:37.809: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-gfwn6" for this suite.
+May 29 19:30:43.840: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:30:43.911: INFO: namespace: e2e-tests-gc-gfwn6, resource: bindings, ignored listing per whitelist
+May 29 19:30:44.203: INFO: namespace e2e-tests-gc-gfwn6 deletion completed in 6.387922337s
+
+• [SLOW TEST:16.740 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:30:44.204: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-lrdxw
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir volume type on tmpfs
+May 29 19:30:44.494: INFO: Waiting up to 5m0s for pod "pod-409da261-8248-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-lrdxw" to be "success or failure"
+May 29 19:30:44.499: INFO: Pod "pod-409da261-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 4.667533ms
+May 29 19:30:46.506: INFO: Pod "pod-409da261-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011708118s
+May 29 19:30:48.520: INFO: Pod "pod-409da261-8248-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025920112s
+STEP: Saw pod success
+May 29 19:30:48.520: INFO: Pod "pod-409da261-8248-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:30:48.527: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-409da261-8248-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:30:48.559: INFO: Waiting for pod pod-409da261-8248-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:30:48.565: INFO: Pod pod-409da261-8248-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:30:48.566: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-lrdxw" for this suite.
+May 29 19:30:54.597: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:30:54.807: INFO: namespace: e2e-tests-emptydir-lrdxw, resource: bindings, ignored listing per whitelist
+May 29 19:30:54.872: INFO: namespace e2e-tests-emptydir-lrdxw deletion completed in 6.297687138s
+
+• [SLOW TEST:10.668 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:30:54.872: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-2xfwl
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-46f8b4e3-8248-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:30:55.164: INFO: Waiting up to 5m0s for pod "pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-2xfwl" to be "success or failure"
+May 29 19:30:55.171: INFO: Pod "pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.248554ms
+May 29 19:30:57.179: INFO: Pod "pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013992679s
+May 29 19:30:59.194: INFO: Pod "pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029825759s
+STEP: Saw pod success
+May 29 19:30:59.195: INFO: Pod "pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:30:59.201: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d container secret-volume-test: 
+STEP: delete the pod
+May 29 19:30:59.229: INFO: Waiting for pod pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:30:59.234: INFO: Pod pod-secrets-46f9b6fc-8248-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:30:59.234: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-2xfwl" for this suite.
+May 29 19:31:05.264: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:31:05.298: INFO: namespace: e2e-tests-secrets-2xfwl, resource: bindings, ignored listing per whitelist
+May 29 19:31:05.502: INFO: namespace e2e-tests-secrets-2xfwl deletion completed in 6.260250462s
+
+• [SLOW TEST:10.630 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should scale a replication controller  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:31:05.502: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-ss8bh
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295
+[It] should scale a replication controller  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating a replication controller
+May 29 19:31:05.762: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:06.030: INFO: stderr: ""
+May 29 19:31:06.030: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May 29 19:31:06.030: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:06.167: INFO: stderr: ""
+May 29 19:31:06.167: INFO: stdout: "update-demo-nautilus-lpkjs update-demo-nautilus-zmr5m "
+May 29 19:31:06.167: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-lpkjs -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:06.306: INFO: stderr: ""
+May 29 19:31:06.306: INFO: stdout: ""
+May 29 19:31:06.306: INFO: update-demo-nautilus-lpkjs is created but not running
+May 29 19:31:11.306: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:11.435: INFO: stderr: ""
+May 29 19:31:11.435: INFO: stdout: "update-demo-nautilus-lpkjs update-demo-nautilus-zmr5m "
+May 29 19:31:11.435: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-lpkjs -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:11.558: INFO: stderr: ""
+May 29 19:31:11.558: INFO: stdout: "true"
+May 29 19:31:11.558: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-lpkjs -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:11.678: INFO: stderr: ""
+May 29 19:31:11.678: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:31:11.678: INFO: validating pod update-demo-nautilus-lpkjs
+May 29 19:31:11.774: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:31:11.774: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:31:11.774: INFO: update-demo-nautilus-lpkjs is verified up and running
+May 29 19:31:11.774: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-zmr5m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:11.884: INFO: stderr: ""
+May 29 19:31:11.884: INFO: stdout: "true"
+May 29 19:31:11.884: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-zmr5m -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:12.005: INFO: stderr: ""
+May 29 19:31:12.005: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:31:12.005: INFO: validating pod update-demo-nautilus-zmr5m
+May 29 19:31:12.104: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:31:12.104: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:31:12.104: INFO: update-demo-nautilus-zmr5m is verified up and running
+STEP: scaling down the replication controller
+May 29 19:31:12.106: INFO: scanned /root for discovery docs: 
+May 29 19:31:12.106: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 scale rc update-demo-nautilus --replicas=1 --timeout=5m --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:13.285: INFO: stderr: ""
+May 29 19:31:13.285: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May 29 19:31:13.285: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:13.414: INFO: stderr: ""
+May 29 19:31:13.414: INFO: stdout: "update-demo-nautilus-lpkjs update-demo-nautilus-zmr5m "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+May 29 19:31:18.414: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:18.565: INFO: stderr: ""
+May 29 19:31:18.565: INFO: stdout: "update-demo-nautilus-lpkjs update-demo-nautilus-zmr5m "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+May 29 19:31:23.565: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:23.709: INFO: stderr: ""
+May 29 19:31:23.709: INFO: stdout: "update-demo-nautilus-lpkjs update-demo-nautilus-zmr5m "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+May 29 19:31:28.710: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:28.831: INFO: stderr: ""
+May 29 19:31:28.831: INFO: stdout: "update-demo-nautilus-zmr5m "
+May 29 19:31:28.831: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-zmr5m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:28.971: INFO: stderr: ""
+May 29 19:31:28.971: INFO: stdout: "true"
+May 29 19:31:28.971: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-zmr5m -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:29.095: INFO: stderr: ""
+May 29 19:31:29.095: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:31:29.095: INFO: validating pod update-demo-nautilus-zmr5m
+May 29 19:31:29.105: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:31:29.105: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:31:29.105: INFO: update-demo-nautilus-zmr5m is verified up and running
+STEP: scaling up the replication controller
+May 29 19:31:29.106: INFO: scanned /root for discovery docs: 
+May 29 19:31:29.106: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 scale rc update-demo-nautilus --replicas=2 --timeout=5m --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:30.281: INFO: stderr: ""
+May 29 19:31:30.281: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May 29 19:31:30.281: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:30.410: INFO: stderr: ""
+May 29 19:31:30.410: INFO: stdout: "update-demo-nautilus-vc9px update-demo-nautilus-zmr5m "
+May 29 19:31:30.410: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-vc9px -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:30.545: INFO: stderr: ""
+May 29 19:31:30.545: INFO: stdout: ""
+May 29 19:31:30.545: INFO: update-demo-nautilus-vc9px is created but not running
+May 29 19:31:35.546: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:36.045: INFO: stderr: ""
+May 29 19:31:36.045: INFO: stdout: "update-demo-nautilus-vc9px update-demo-nautilus-zmr5m "
+May 29 19:31:36.045: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-vc9px -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:36.152: INFO: stderr: ""
+May 29 19:31:36.152: INFO: stdout: "true"
+May 29 19:31:36.152: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-vc9px -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:36.283: INFO: stderr: ""
+May 29 19:31:36.283: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:31:36.283: INFO: validating pod update-demo-nautilus-vc9px
+May 29 19:31:36.383: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:31:36.383: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:31:36.383: INFO: update-demo-nautilus-vc9px is verified up and running
+May 29 19:31:36.383: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-zmr5m -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:36.534: INFO: stderr: ""
+May 29 19:31:36.534: INFO: stdout: "true"
+May 29 19:31:36.534: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods update-demo-nautilus-zmr5m -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:36.677: INFO: stderr: ""
+May 29 19:31:36.677: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May 29 19:31:36.677: INFO: validating pod update-demo-nautilus-zmr5m
+May 29 19:31:36.687: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May 29 19:31:36.687: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May 29 19:31:36.687: INFO: update-demo-nautilus-zmr5m is verified up and running
+STEP: using delete to clean up resources
+May 29 19:31:36.687: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:36.802: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:31:36.802: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n"
+May 29 19:31:36.802: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get rc,svc -l name=update-demo --no-headers --namespace=e2e-tests-kubectl-ss8bh'
+May 29 19:31:36.945: INFO: stderr: "No resources found.\n"
+May 29 19:31:36.945: INFO: stdout: ""
+May 29 19:31:36.945: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -l name=update-demo --namespace=e2e-tests-kubectl-ss8bh -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+May 29 19:31:37.072: INFO: stderr: ""
+May 29 19:31:37.072: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:31:37.072: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-ss8bh" for this suite.
+May 29 19:31:43.140: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:31:43.212: INFO: namespace: e2e-tests-kubectl-ss8bh, resource: bindings, ignored listing per whitelist
+May 29 19:31:43.447: INFO: namespace e2e-tests-kubectl-ss8bh deletion completed in 6.365146887s
+
+• [SLOW TEST:37.945 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Update Demo
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should scale a replication controller  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:31:43.448: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename daemonsets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-daemonsets-p4xkl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:31:43.764: INFO: Creating simple daemon set daemon-set
+STEP: Check that daemon pods launch on every node of the cluster.
+May 29 19:31:43.786: INFO: Number of nodes with available pods: 0
+May 29 19:31:43.786: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:31:44.802: INFO: Number of nodes with available pods: 0
+May 29 19:31:44.802: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:31:45.803: INFO: Number of nodes with available pods: 0
+May 29 19:31:45.803: INFO: Node scw-sono13-default-2865dd8133304358ae8da697bb2 is running more than one daemon pod
+May 29 19:31:46.802: INFO: Number of nodes with available pods: 2
+May 29 19:31:46.802: INFO: Number of running nodes: 2, number of available pods: 2
+STEP: Update daemon pods image.
+STEP: Check that daemon pods images are updated.
+May 29 19:31:46.841: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:46.841: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:47.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:47.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:48.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:48.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:49.864: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:49.864: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:50.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:50.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:51.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:51.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:52.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:52.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:53.861: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:53.861: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:54.858: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:54.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:55.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:55.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:56.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:56.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:57.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:57.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:58.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:58.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:59.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:31:59.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:00.864: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:00.864: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:01.864: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:01.864: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:02.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:02.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:03.859: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:03.859: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:04.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:04.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:05.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:05.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:06.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:06.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:07.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:07.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:08.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:08.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:09.855: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:09.855: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:10.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:10.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:11.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:11.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:12.858: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:12.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:13.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:13.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:14.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:14.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:15.864: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:15.864: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:16.859: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:16.859: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:17.855: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:17.855: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:18.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:18.856: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:18.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:19.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:19.857: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:19.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:20.859: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:20.859: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:20.859: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:21.865: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:21.866: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:21.866: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:22.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:22.857: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:22.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:23.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:23.857: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:23.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:24.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:24.857: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:24.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:25.864: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:25.865: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:25.865: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:26.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:26.856: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:26.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:27.858: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:27.858: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:27.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:28.856: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:28.856: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:28.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:29.857: INFO: Wrong image for pod: daemon-set-tlj2c. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:29.857: INFO: Pod daemon-set-tlj2c is not available
+May 29 19:32:29.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:30.856: INFO: Pod daemon-set-fv2tz is not available
+May 29 19:32:30.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:31.856: INFO: Pod daemon-set-fv2tz is not available
+May 29 19:32:31.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:32.864: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:33.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:34.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:35.923: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:36.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:37.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:38.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:39.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:40.855: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:41.855: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:42.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:43.864: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:44.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:45.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:46.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:47.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:48.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:49.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:50.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:51.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:52.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:53.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:54.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:55.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:56.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:57.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:58.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:32:59.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:00.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:01.858: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:02.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:02.856: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:03.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:03.856: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:04.864: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:04.864: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:05.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:05.857: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:06.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:06.857: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:07.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:07.857: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:08.857: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:08.857: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:09.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:09.856: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:10.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:10.856: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:11.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:11.856: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:12.856: INFO: Wrong image for pod: daemon-set-xrs4d. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1.
+May 29 19:33:12.857: INFO: Pod daemon-set-xrs4d is not available
+May 29 19:33:13.856: INFO: Pod daemon-set-dfb44 is not available
+STEP: Check that daemon pods are still running on every node of the cluster.
+May 29 19:33:13.881: INFO: Number of nodes with available pods: 1
+May 29 19:33:13.881: INFO: Node scw-sono13-default-71171af685174eada6c25c1541e is running more than one daemon pod
+May 29 19:33:14.904: INFO: Number of nodes with available pods: 1
+May 29 19:33:14.904: INFO: Node scw-sono13-default-71171af685174eada6c25c1541e is running more than one daemon pod
+May 29 19:33:15.902: INFO: Number of nodes with available pods: 1
+May 29 19:33:15.902: INFO: Node scw-sono13-default-71171af685174eada6c25c1541e is running more than one daemon pod
+May 29 19:33:16.898: INFO: Number of nodes with available pods: 2
+May 29 19:33:16.898: INFO: Number of running nodes: 2, number of available pods: 2
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-p4xkl, will wait for the garbage collector to delete the pods
+May 29 19:33:17.006: INFO: Deleting DaemonSet.extensions daemon-set took: 10.994804ms
+May 29 19:33:17.106: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.190284ms
+May 29 19:33:20.214: INFO: Number of nodes with available pods: 0
+May 29 19:33:20.214: INFO: Number of running nodes: 0, number of available pods: 0
+May 29 19:33:20.220: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-p4xkl/daemonsets","resourceVersion":"949044554"},"items":null}
+
+May 29 19:33:20.226: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-p4xkl/pods","resourceVersion":"949044554"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:33:20.245: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-daemonsets-p4xkl" for this suite.
+May 29 19:33:26.282: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:33:26.452: INFO: namespace: e2e-tests-daemonsets-p4xkl, resource: bindings, ignored listing per whitelist
+May 29 19:33:26.538: INFO: namespace e2e-tests-daemonsets-p4xkl deletion completed in 6.285629351s
+
+• [SLOW TEST:103.090 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl rolling-update 
+  should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:33:26.538: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-99chq
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1358
+[It] should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May 29 19:33:26.854: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=e2e-tests-kubectl-99chq'
+May 29 19:33:27.433: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+May 29 19:33:27.433: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+STEP: rolling-update to same image controller
+May 29 19:33:27.444: INFO: scanned /root for discovery docs: 
+May 29 19:33:27.444: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 rolling-update e2e-test-nginx-rc --update-period=1s --image=docker.io/library/nginx:1.14-alpine --image-pull-policy=IfNotPresent --namespace=e2e-tests-kubectl-99chq'
+May 29 19:33:43.843: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+May 29 19:33:43.843: INFO: stdout: "Created e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414\nScaling up e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+May 29 19:33:43.843: INFO: stdout: "Created e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414\nScaling up e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+STEP: waiting for all containers in run=e2e-test-nginx-rc pods to come up.
+May 29 19:33:43.843: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l run=e2e-test-nginx-rc --namespace=e2e-tests-kubectl-99chq'
+May 29 19:33:43.991: INFO: stderr: ""
+May 29 19:33:43.991: INFO: stdout: "e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414-f75lj "
+May 29 19:33:43.991: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414-f75lj -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "e2e-test-nginx-rc") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-99chq'
+May 29 19:33:44.136: INFO: stderr: ""
+May 29 19:33:44.136: INFO: stdout: "true"
+May 29 19:33:44.136: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414-f75lj -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "e2e-test-nginx-rc"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-99chq'
+May 29 19:33:44.258: INFO: stderr: ""
+May 29 19:33:44.258: INFO: stdout: "docker.io/library/nginx:1.14-alpine"
+May 29 19:33:44.258: INFO: e2e-test-nginx-rc-29993bcc233d3a7ef7be0a94a27d8414-f75lj is verified up and running
+[AfterEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1364
+May 29 19:33:44.258: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete rc e2e-test-nginx-rc --namespace=e2e-tests-kubectl-99chq'
+May 29 19:33:44.393: INFO: stderr: ""
+May 29 19:33:44.393: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:33:44.393: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-99chq" for this suite.
+May 29 19:34:06.437: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:34:06.485: INFO: namespace: e2e-tests-kubectl-99chq, resource: bindings, ignored listing per whitelist
+May 29 19:34:06.661: INFO: namespace e2e-tests-kubectl-99chq deletion completed in 22.258354437s
+
+• [SLOW TEST:40.123 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should support rolling-update to same image  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:34:06.661: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-wgwcb
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-map-b94ef125-8248-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 19:34:06.991: INFO: Waiting up to 5m0s for pod "pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-wgwcb" to be "success or failure"
+May 29 19:34:06.997: INFO: Pod "pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.237244ms
+May 29 19:34:09.006: INFO: Pod "pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014664912s
+May 29 19:34:11.013: INFO: Pod "pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022202138s
+STEP: Saw pod success
+May 29 19:34:11.013: INFO: Pod "pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:34:11.020: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d container configmap-volume-test: 
+STEP: delete the pod
+May 29 19:34:11.055: INFO: Waiting for pod pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:34:11.063: INFO: Pod pod-configmaps-b95016ab-8248-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:34:11.063: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-wgwcb" for this suite.
+May 29 19:34:17.093: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:34:17.241: INFO: namespace: e2e-tests-configmap-wgwcb, resource: bindings, ignored listing per whitelist
+May 29 19:34:17.392: INFO: namespace e2e-tests-configmap-wgwcb deletion completed in 6.320292733s
+
+• [SLOW TEST:10.732 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-apps] Deployment 
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:34:17.393: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename deployment
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-deployment-zmttc
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:34:17.680: INFO: Creating deployment "nginx-deployment"
+May 29 19:34:17.687: INFO: Waiting for observed generation 1
+May 29 19:34:19.700: INFO: Waiting for all required pods to come up
+May 29 19:34:19.712: INFO: Pod name nginx: Found 10 pods out of 10
+STEP: ensuring each pod is running
+May 29 19:34:21.737: INFO: Waiting for deployment "nginx-deployment" to complete
+May 29 19:34:21.753: INFO: Updating deployment "nginx-deployment" with a non-existent image
+May 29 19:34:21.765: INFO: Updating deployment nginx-deployment
+May 29 19:34:21.765: INFO: Waiting for observed generation 2
+May 29 19:34:23.789: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8
+May 29 19:34:23.795: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8
+May 29 19:34:23.805: INFO: Waiting for the first rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+May 29 19:34:23.826: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0
+May 29 19:34:23.826: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5
+May 29 19:34:23.832: INFO: Waiting for the second rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+May 29 19:34:23.844: INFO: Verifying that deployment "nginx-deployment" has minimum required number of available replicas
+May 29 19:34:23.844: INFO: Scaling up the deployment "nginx-deployment" from 10 to 30
+May 29 19:34:23.858: INFO: Updating deployment nginx-deployment
+May 29 19:34:23.858: INFO: Waiting for the replicasets of deployment "nginx-deployment" to have desired number of replicas
+May 29 19:34:23.870: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20
+May 29 19:34:25.886: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+May 29 19:34:25.898: INFO: Deployment "nginx-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment,GenerateName:,Namespace:e2e-tests-deployment-zmttc,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-zmttc/deployments/nginx-deployment,UID:bfb16e9b-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049833,Generation:3,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*30,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:33,UpdatedReplicas:13,AvailableReplicas:8,UnavailableReplicas:25,Conditions:[{Available False 2019-05-29 19:34:23 +0000 UTC 2019-05-29 19:34:23 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-05-29 19:34:24 +0000 UTC 2019-05-29 19:34:17 +0000 UTC ReplicaSetUpdated ReplicaSet "nginx-deployment-65bbdb5f8" is progressing.}],ReadyReplicas:8,CollisionCount:nil,},}
+
+May 29 19:34:25.907: INFO: New ReplicaSet "nginx-deployment-65bbdb5f8" of Deployment "nginx-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8,GenerateName:,Namespace:e2e-tests-deployment-zmttc,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-zmttc/replicasets/nginx-deployment-65bbdb5f8,UID:c2208a16-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049817,Generation:3,CreationTimestamp:2019-05-29 19:34:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment nginx-deployment bfb16e9b-8248-11e9-9b18-c2b4512ea1b9 0xc001bca187 0xc001bca188}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*13,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:13,FullyLabeledReplicas:13,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May 29 19:34:25.907: INFO: All old ReplicaSets of Deployment "nginx-deployment":
+May 29 19:34:25.907: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965,GenerateName:,Namespace:e2e-tests-deployment-zmttc,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-zmttc/replicasets/nginx-deployment-555b55d965,UID:bfb23f0b-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049813,Generation:3,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment nginx-deployment bfb16e9b-8248-11e9-9b18-c2b4512ea1b9 0xc001bca0c7 0xc001bca0c8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*20,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:20,FullyLabeledReplicas:20,ObservedGeneration:3,ReadyReplicas:8,AvailableReplicas:8,Conditions:[],},}
+May 29 19:34:25.920: INFO: Pod "nginx-deployment-555b55d965-4ck4c" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-4ck4c,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-4ck4c,UID:c36a7a87-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049986,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc002026fb7 0xc002026fb8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002027020} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002027040}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:24 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.920: INFO: Pod "nginx-deployment-555b55d965-64bfc" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-64bfc,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-64bfc,UID:c361d72e-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049771,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc002027f77 0xc002027f78}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002027fe0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72000}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.920: INFO: Pod "nginx-deployment-555b55d965-6bpjg" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-6bpjg,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-6bpjg,UID:bfb66a9c-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049453,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b720b7 0xc001b720b8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b72120} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b721f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:100.64.1.189,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:20 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://3c347d82f8612e617a59ad8db6aa191c688c18c00954b32df671f3689b2bc32d}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.920: INFO: Pod "nginx-deployment-555b55d965-6lhkd" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-6lhkd,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-6lhkd,UID:c360c785-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049792,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b722b7 0xc001b722b8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b72320} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72340}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.920: INFO: Pod "nginx-deployment-555b55d965-7wbph" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-7wbph,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-7wbph,UID:c361d22e-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049814,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b72427 0xc001b72428}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b725a0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b725c0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.921: INFO: Pod "nginx-deployment-555b55d965-975mn" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-975mn,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-975mn,UID:c36a5bfc-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049907,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b72677 0xc001b72678}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b726e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72700}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.921: INFO: Pod "nginx-deployment-555b55d965-bq87b" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-bq87b,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-bq87b,UID:bfb69152-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049426,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b72877 0xc001b72878}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b728e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72900}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:100.64.0.94,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:19 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://a9b5dc36a18350ed1ea3e03eabfeb2395dcf4964f9123f71abc4ea68ed8ec47e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.921: INFO: Pod "nginx-deployment-555b55d965-cx8wj" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-cx8wj,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-cx8wj,UID:c362d17e-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049838,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b729c7 0xc001b729c8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b72a30} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72a50}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.921: INFO: Pod "nginx-deployment-555b55d965-hr7pv" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-hr7pv,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-hr7pv,UID:c377ff66-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049809,Generation:0,CreationTimestamp:2019-05-29 19:34:24 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b72b37 0xc001b72b38}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b72ba0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72bc0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.921: INFO: Pod "nginx-deployment-555b55d965-hvcnw" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-hvcnw,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-hvcnw,UID:bfb593b2-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049436,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b72c47 0xc001b72c48}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b72d20} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72d40}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:100.64.0.92,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:19 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://5dc04ed7575141969868a95cb7ad5ee6a2743906a6a6baddc12545e5009f1236}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.921: INFO: Pod "nginx-deployment-555b55d965-j9tv6" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-j9tv6,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-j9tv6,UID:bfb3a6bd-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049335,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b72e27 0xc001b72e28}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b72ec0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b72ee0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:19 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:19 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:100.64.1.185,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:19 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://7cb9ce50a50e2b478e267398c3f238b415bfb8eb2f994db562d43f4170844340}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.921: INFO: Pod "nginx-deployment-555b55d965-jsxsg" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-jsxsg,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-jsxsg,UID:c362e431-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049795,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b73007 0xc001b73008}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b73080} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b730a0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.922: INFO: Pod "nginx-deployment-555b55d965-kk8pk" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-kk8pk,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-kk8pk,UID:c362d857-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049855,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b73167 0xc001b73168}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b731d0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b731f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.922: INFO: Pod "nginx-deployment-555b55d965-l2x88" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-l2x88,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-l2x88,UID:c362e5d5-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049874,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b734d7 0xc001b734d8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b73540} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b73560}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:24 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.922: INFO: Pod "nginx-deployment-555b55d965-l6gj2" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-l6gj2,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-l6gj2,UID:bfb488d9-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049423,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b73617 0xc001b73618}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b739c0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b73d10}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:100.64.0.90,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:19 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://d31f7ef930edb2c4ff0bc0f23364f858cc2dc564d688ed66772b97b4a7782c4e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.922: INFO: Pod "nginx-deployment-555b55d965-plgv8" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-plgv8,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-plgv8,UID:c36d1312-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949050023,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001b73f37 0xc001b73f38}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001b73fc0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001b73ff0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:24 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.922: INFO: Pod "nginx-deployment-555b55d965-pqf98" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-pqf98,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-pqf98,UID:bfb59acf-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049448,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001d480a7 0xc001d480a8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d48190} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d481b0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:20 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:100.64.1.188,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:20 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://5bb5364eecee23fd139f050f8ba037865ffd4ea49d45d68a6ba7fd17df636f6f}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.922: INFO: Pod "nginx-deployment-555b55d965-vd657" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-vd657,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-vd657,UID:c36a7fb0-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049983,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001d48277 0xc001d48278}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d482e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d48310}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:24 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.923: INFO: Pod "nginx-deployment-555b55d965-wcj8p" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-wcj8p,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-wcj8p,UID:bfb669c3-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049329,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001d48507 0xc001d48508}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d48730} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d48750}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:19 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:19 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:100.64.1.187,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:19 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://f52a78e6fa5267d733a74c6a89d01034d15a79fb39bb6ed4eccf4c6aa52107d7}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.923: INFO: Pod "nginx-deployment-555b55d965-z9k4q" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-z9k4q,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-555b55d965-z9k4q,UID:bfb48c73-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049324,Generation:0,CreationTimestamp:2019-05-29 19:34:17 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 bfb23f0b-8248-11e9-9b18-c2b4512ea1b9 0xc001d48817 0xc001d48818}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d489c0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d489e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:19 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:19 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:17 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:100.64.1.186,StartTime:2019-05-29 19:34:17 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-29 19:34:19 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://05dc041120f817f843c7265bb1684dd85c5ee1aefddf7ae23e6ff1b47379b8c9}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.923: INFO: Pod "nginx-deployment-65bbdb5f8-2lnz2" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-2lnz2,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-2lnz2,UID:c22b736b-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049590,Generation:0,CreationTimestamp:2019-05-29 19:34:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d48ae7 0xc001d48ae8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d48be0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d48c00}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.923: INFO: Pod "nginx-deployment-65bbdb5f8-dxjpd" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-dxjpd,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-dxjpd,UID:c368f945-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049797,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d48cf7 0xc001d48cf8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d48d60} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d48d80}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.923: INFO: Pod "nginx-deployment-65bbdb5f8-lglz2" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-lglz2,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-lglz2,UID:c2215856-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049568,Generation:0,CreationTimestamp:2019-05-29 19:34:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d48f47 0xc001d48f48}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d48fb0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d48fd0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.923: INFO: Pod "nginx-deployment-65bbdb5f8-ljw6w" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-ljw6w,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-ljw6w,UID:c36a790e-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049942,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d49127 0xc001d49128}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d491e0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d49200}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:24 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.923: INFO: Pod "nginx-deployment-65bbdb5f8-nsnrr" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-nsnrr,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-nsnrr,UID:c369309d-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049869,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d49337 0xc001d49338}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d493a0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d493c0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-prxjp" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-prxjp,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-prxjp,UID:c368fcae-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049910,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d49487 0xc001d49488}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d49900} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d49920}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:24 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-rdmlv" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-rdmlv,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-rdmlv,UID:c368fe3d-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049947,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d49aa7 0xc001d49aa8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d49b20} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d49b40}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:24 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-v2pmj" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-v2pmj,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-v2pmj,UID:c3628f0b-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049794,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d49cf7 0xc001d49cf8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d49d60} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d49d80}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-wb8tf" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-wb8tf,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-wb8tf,UID:c22aaaf6-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049588,Generation:0,CreationTimestamp:2019-05-29 19:34:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001d49e47 0xc001d49e48}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001d49fd0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001d49ff0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-wmngb" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-wmngb,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-wmngb,UID:c2224f81-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049567,Generation:0,CreationTimestamp:2019-05-29 19:34:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001ea8307 0xc001ea8308}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001ea8370} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001ea8400}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-wxzzd" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-wxzzd,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-wxzzd,UID:c2226524-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049581,Generation:0,CreationTimestamp:2019-05-29 19:34:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001ea8787 0xc001ea8788}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001ea87f0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001ea8810}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:21 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-xr44r" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-xr44r,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-xr44r,UID:c361a2b1-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049761,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001ea88d7 0xc001ea88d8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-71171af685174eada6c25c1541e,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001ea8a30} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001ea8a50}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.157.201,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May 29 19:34:25.924: INFO: Pod "nginx-deployment-65bbdb5f8-xs4v6" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-xs4v6,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-zmttc,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zmttc/pods/nginx-deployment-65bbdb5f8-xs4v6,UID:c36284a3-8248-11e9-9b18-c2b4512ea1b9,ResourceVersion:949049846,Generation:0,CreationTimestamp:2019-05-29 19:34:23 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 c2208a16-8248-11e9-9b18-c2b4512ea1b9 0xc001ea8b17 0xc001ea8b18}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-pthxt {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-pthxt,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-pthxt true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:scw-sono13-default-2865dd8133304358ae8da697bb2,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc001ea8e10} {node.kubernetes.io/unreachable Exists  NoExecute 0xc001ea8e30}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-29 19:34:23 +0000 UTC  }],Message:,Reason:,HostIP:10.12.149.215,PodIP:,StartTime:2019-05-29 19:34:23 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:34:25.924: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-zmttc" for this suite.
+May 29 19:34:33.967: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:34:33.989: INFO: namespace: e2e-tests-deployment-zmttc, resource: bindings, ignored listing per whitelist
+May 29 19:34:34.277: INFO: namespace e2e-tests-deployment-zmttc deletion completed in 8.339337351s
+
+• [SLOW TEST:16.883 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run rc 
+  should create an rc from an image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:34:34.277: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+E0529 19:34:34.519478      19 memcache.go:135] couldn't get resource list for metrics.k8s.io/v1beta1: the server is currently unable to handle the request
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-d88m2
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1298
+[It] should create an rc from an image  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May 29 19:34:35.233: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=e2e-tests-kubectl-d88m2'
+May 29 19:34:35.347: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+May 29 19:34:35.347: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+STEP: verifying the pod controlled by rc e2e-test-nginx-rc was created
+STEP: confirm that you can get logs from an rc
+May 29 19:34:35.360: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [e2e-test-nginx-rc-mtlw6]
+May 29 19:34:35.360: INFO: Waiting up to 5m0s for pod "e2e-test-nginx-rc-mtlw6" in namespace "e2e-tests-kubectl-d88m2" to be "running and ready"
+May 29 19:34:35.366: INFO: Pod "e2e-test-nginx-rc-mtlw6": Phase="Pending", Reason="", readiness=false. Elapsed: 5.707107ms
+May 29 19:34:37.373: INFO: Pod "e2e-test-nginx-rc-mtlw6": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012750541s
+May 29 19:34:39.380: INFO: Pod "e2e-test-nginx-rc-mtlw6": Phase="Running", Reason="", readiness=true. Elapsed: 4.019648874s
+May 29 19:34:39.380: INFO: Pod "e2e-test-nginx-rc-mtlw6" satisfied condition "running and ready"
+May 29 19:34:39.380: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [e2e-test-nginx-rc-mtlw6]
+May 29 19:34:39.380: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 logs rc/e2e-test-nginx-rc --namespace=e2e-tests-kubectl-d88m2'
+May 29 19:34:39.634: INFO: stderr: ""
+May 29 19:34:39.634: INFO: stdout: ""
+[AfterEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1303
+May 29 19:34:39.634: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete rc e2e-test-nginx-rc --namespace=e2e-tests-kubectl-d88m2'
+May 29 19:34:39.795: INFO: stderr: ""
+May 29 19:34:39.795: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:34:39.796: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-d88m2" for this suite.
+May 29 19:34:45.854: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:34:46.126: INFO: namespace: e2e-tests-kubectl-d88m2, resource: bindings, ignored listing per whitelist
+May 29 19:34:46.140: INFO: namespace e2e-tests-kubectl-d88m2 deletion completed in 6.316808408s
+
+• [SLOW TEST:11.863 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run rc
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create an rc from an image  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:34:46.140: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-blvd5
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+May 29 19:34:46.474: INFO: Waiting up to 5m0s for pod "pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-blvd5" to be "success or failure"
+May 29 19:34:46.481: INFO: Pod "pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.496711ms
+May 29 19:34:48.487: INFO: Pod "pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013067417s
+May 29 19:34:50.495: INFO: Pod "pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020884244s
+STEP: Saw pod success
+May 29 19:34:50.495: INFO: Pod "pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:34:50.502: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:34:50.533: INFO: Waiting for pod pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:34:50.539: INFO: Pod pod-d0d8d7aa-8248-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:34:50.539: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-blvd5" for this suite.
+May 29 19:34:56.576: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:34:56.749: INFO: namespace: e2e-tests-emptydir-blvd5, resource: bindings, ignored listing per whitelist
+May 29 19:34:56.864: INFO: namespace e2e-tests-emptydir-blvd5 deletion completed in 6.316987561s
+
+• [SLOW TEST:10.724 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0666,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:34:56.865: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-prncd
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-upd-d73d0b8c-8248-11e9-bd6e-667e8fbec69d
+STEP: Creating the pod
+STEP: Updating configmap configmap-test-upd-d73d0b8c-8248-11e9-bd6e-667e8fbec69d
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:36:04.161: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-prncd" for this suite.
+May 29 19:36:26.192: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:36:26.279: INFO: namespace: e2e-tests-configmap-prncd, resource: bindings, ignored listing per whitelist
+May 29 19:36:26.467: INFO: namespace e2e-tests-configmap-prncd deletion completed in 22.297180981s
+
+• [SLOW TEST:89.602 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Services 
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:36:26.467: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename services
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-services-dzc7c
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85
+[It] should provide secure master service  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:36:26.742: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-services-dzc7c" for this suite.
+May 29 19:36:32.776: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:36:32.868: INFO: namespace: e2e-tests-services-dzc7c, resource: bindings, ignored listing per whitelist
+May 29 19:36:33.156: INFO: namespace e2e-tests-services-dzc7c deletion completed in 6.40693561s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90
+
+• [SLOW TEST:6.689 seconds]
+[sig-network] Services
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[k8s.io] Probing container 
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:36:33.157: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-wlmpd
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:37:33.636: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-wlmpd" for this suite.
+May 29 19:37:57.667: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:37:57.720: INFO: namespace: e2e-tests-container-probe-wlmpd, resource: bindings, ignored listing per whitelist
+May 29 19:37:57.933: INFO: namespace e2e-tests-container-probe-wlmpd deletion completed in 24.287060669s
+
+• [SLOW TEST:84.776 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:37:57.933: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-gqmwm
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with configMap that has name projected-configmap-test-upd-43221465-8249-11e9-bd6e-667e8fbec69d
+STEP: Creating the pod
+STEP: Updating configmap projected-configmap-test-upd-43221465-8249-11e9-bd6e-667e8fbec69d
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:38:02.289: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-gqmwm" for this suite.
+May 29 19:38:28.318: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:38:28.484: INFO: namespace: e2e-tests-projected-gqmwm, resource: bindings, ignored listing per whitelist
+May 29 19:38:28.620: INFO: namespace e2e-tests-projected-gqmwm deletion completed in 26.323352898s
+
+• [SLOW TEST:30.688 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with projected pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:38:28.621: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename subpath
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-subpath-jh8z5
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with projected pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-projected-9vgh
+STEP: Creating a pod to test atomic-volume-subpath
+May 29 19:38:28.972: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-9vgh" in namespace "e2e-tests-subpath-jh8z5" to be "success or failure"
+May 29 19:38:28.978: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Pending", Reason="", readiness=false. Elapsed: 6.05511ms
+May 29 19:38:30.986: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013876892s
+May 29 19:38:32.994: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 4.021896355s
+May 29 19:38:35.002: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 6.029907904s
+May 29 19:38:37.010: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 8.03788691s
+May 29 19:38:39.029: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 10.056828791s
+May 29 19:38:41.036: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 12.063885825s
+May 29 19:38:43.044: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 14.072182307s
+May 29 19:38:45.052: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 16.080204245s
+May 29 19:38:47.060: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 18.087885842s
+May 29 19:38:49.075: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 20.103267381s
+May 29 19:38:51.083: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Running", Reason="", readiness=false. Elapsed: 22.110427558s
+May 29 19:38:53.092: INFO: Pod "pod-subpath-test-projected-9vgh": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.120066363s
+STEP: Saw pod success
+May 29 19:38:53.092: INFO: Pod "pod-subpath-test-projected-9vgh" satisfied condition "success or failure"
+May 29 19:38:53.099: INFO: Trying to get logs from node scw-sono13-default-2865dd8133304358ae8da697bb2 pod pod-subpath-test-projected-9vgh container test-container-subpath-projected-9vgh: 
+STEP: delete the pod
+May 29 19:38:53.133: INFO: Waiting for pod pod-subpath-test-projected-9vgh to disappear
+May 29 19:38:53.139: INFO: Pod pod-subpath-test-projected-9vgh no longer exists
+STEP: Deleting pod pod-subpath-test-projected-9vgh
+May 29 19:38:53.139: INFO: Deleting pod "pod-subpath-test-projected-9vgh" in namespace "e2e-tests-subpath-jh8z5"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:38:53.159: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-jh8z5" for this suite.
+May 29 19:38:59.199: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:38:59.261: INFO: namespace: e2e-tests-subpath-jh8z5, resource: bindings, ignored listing per whitelist
+May 29 19:38:59.430: INFO: namespace e2e-tests-subpath-jh8z5 deletion completed in 6.261319945s
+
+• [SLOW TEST:30.809 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with projected pod [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:38:59.431: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename var-expansion
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-var-expansion-cskxn
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test env composition
+May 29 19:38:59.717: INFO: Waiting up to 5m0s for pod "var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-var-expansion-cskxn" to be "success or failure"
+May 29 19:38:59.722: INFO: Pod "var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.213579ms
+May 29 19:39:01.730: INFO: Pod "var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012731728s
+May 29 19:39:03.737: INFO: Pod "var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020375985s
+STEP: Saw pod success
+May 29 19:39:03.738: INFO: Pod "var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:39:03.744: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d container dapi-container: 
+STEP: delete the pod
+May 29 19:39:03.774: INFO: Waiting for pod var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:39:03.781: INFO: Pod var-expansion-67ca7d77-8249-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:39:03.781: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-var-expansion-cskxn" for this suite.
+May 29 19:39:09.817: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:39:09.975: INFO: namespace: e2e-tests-var-expansion-cskxn, resource: bindings, ignored listing per whitelist
+May 29 19:39:10.138: INFO: namespace e2e-tests-var-expansion-cskxn deletion completed in 6.34931226s
+
+• [SLOW TEST:10.707 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:39:10.139: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename container-probe
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-probe-68qcn
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-68qcn
+May 29 19:39:14.430: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-68qcn
+STEP: checking the pod's current state and verifying that restartCount is present
+May 29 19:39:14.441: INFO: Initial restart count of pod liveness-http is 0
+May 29 19:39:24.494: INFO: Restart count of pod e2e-tests-container-probe-68qcn/liveness-http is now 1 (10.053262506s elapsed)
+May 29 19:39:44.588: INFO: Restart count of pod e2e-tests-container-probe-68qcn/liveness-http is now 2 (30.147471644s elapsed)
+May 29 19:40:04.680: INFO: Restart count of pod e2e-tests-container-probe-68qcn/liveness-http is now 3 (50.239379216s elapsed)
+May 29 19:40:24.769: INFO: Restart count of pod e2e-tests-container-probe-68qcn/liveness-http is now 4 (1m10.327874156s elapsed)
+May 29 19:41:25.066: INFO: Restart count of pod e2e-tests-container-probe-68qcn/liveness-http is now 5 (2m10.625275128s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:41:25.085: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-68qcn" for this suite.
+May 29 19:41:31.125: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:41:31.469: INFO: namespace: e2e-tests-container-probe-68qcn, resource: bindings, ignored listing per whitelist
+May 29 19:41:31.474: INFO: namespace e2e-tests-container-probe-68qcn deletion completed in 6.380647803s
+
+• [SLOW TEST:141.336 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:41:31.475: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename sched-pred
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-sched-pred-xpgh4
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+May 29 19:41:31.832: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+May 29 19:41:31.856: INFO: Waiting for terminating namespaces to be deleted...
+May 29 19:41:31.863: INFO: 
+Logging pods the kubelet thinks is on node scw-sono13-default-2865dd8133304358ae8da697bb2 before test
+May 29 19:41:31.876: INFO: node-problem-detector-6bkln from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container node-problem-detector ready: true, restart count 0
+May 29 19:41:31.876: INFO: sonobuoy-systemd-logs-daemon-set-537397329e444263-krxc2 from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container sonobuoy-worker ready: true, restart count 1
+May 29 19:41:31.876: INFO: 	Container systemd-logs ready: true, restart count 1
+May 29 19:41:31.876: INFO: monitoring-influxdb-7c84bfcfc8-snwmn from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container influxdb ready: true, restart count 0
+May 29 19:41:31.876: INFO: metrics-server-794596bd9d-x9dz9 from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container metrics-server ready: true, restart count 0
+May 29 19:41:31.876: INFO: kubernetes-dashboard-794fb6974c-d7btd from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container kubernetes-dashboard ready: true, restart count 0
+May 29 19:41:31.876: INFO: kube-proxy-s4qs6 from kube-system started at 2019-05-29 18:14:10 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container kube-proxy ready: true, restart count 0
+May 29 19:41:31.876: INFO: coredns-59b5b6c955-ssfxs from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container coredns ready: true, restart count 0
+May 29 19:41:31.876: INFO: flannel-nnv2c from kube-system started at 2019-05-29 18:14:10 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container kube-flannel ready: true, restart count 0
+May 29 19:41:31.876: INFO: heapster-d8d4579b6-fzrlt from kube-system started at 2019-05-29 18:14:30 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.876: INFO: 	Container heapster ready: true, restart count 0
+May 29 19:41:31.876: INFO: 
+Logging pods the kubelet thinks is on node scw-sono13-default-71171af685174eada6c25c1541e before test
+May 29 19:41:31.893: INFO: sonobuoy-e2e-job-721690eaa8df4a4c from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded)
+May 29 19:41:31.893: INFO: 	Container e2e ready: true, restart count 0
+May 29 19:41:31.893: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+May 29 19:41:31.893: INFO: node-problem-detector-lbd8v from kube-system started at 2019-05-29 18:14:33 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.893: INFO: 	Container node-problem-detector ready: true, restart count 0
+May 29 19:41:31.893: INFO: kube-proxy-7jxzv from kube-system started at 2019-05-29 18:14:13 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.893: INFO: 	Container kube-proxy ready: true, restart count 0
+May 29 19:41:31.893: INFO: flannel-8bs82 from kube-system started at 2019-05-29 18:14:14 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.893: INFO: 	Container kube-flannel ready: true, restart count 0
+May 29 19:41:31.893: INFO: sonobuoy from heptio-sonobuoy started at 2019-05-29 18:15:07 +0000 UTC (1 container statuses recorded)
+May 29 19:41:31.893: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+May 29 19:41:31.893: INFO: sonobuoy-systemd-logs-daemon-set-537397329e444263-ct67t from heptio-sonobuoy started at 2019-05-29 18:15:12 +0000 UTC (2 container statuses recorded)
+May 29 19:41:31.893: INFO: 	Container sonobuoy-worker ready: true, restart count 1
+May 29 19:41:31.893: INFO: 	Container systemd-logs ready: true, restart count 1
+[It] validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: verifying the node has the label node scw-sono13-default-2865dd8133304358ae8da697bb2
+STEP: verifying the node has the label node scw-sono13-default-71171af685174eada6c25c1541e
+May 29 19:41:32.023: INFO: Pod sonobuoy requesting resource cpu=0m on Node scw-sono13-default-71171af685174eada6c25c1541e
+May 29 19:41:32.024: INFO: Pod sonobuoy-e2e-job-721690eaa8df4a4c requesting resource cpu=0m on Node scw-sono13-default-71171af685174eada6c25c1541e
+May 29 19:41:32.024: INFO: Pod sonobuoy-systemd-logs-daemon-set-537397329e444263-ct67t requesting resource cpu=0m on Node scw-sono13-default-71171af685174eada6c25c1541e
+May 29 19:41:32.024: INFO: Pod sonobuoy-systemd-logs-daemon-set-537397329e444263-krxc2 requesting resource cpu=0m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod coredns-59b5b6c955-ssfxs requesting resource cpu=100m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod flannel-8bs82 requesting resource cpu=100m on Node scw-sono13-default-71171af685174eada6c25c1541e
+May 29 19:41:32.024: INFO: Pod flannel-nnv2c requesting resource cpu=100m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod heapster-d8d4579b6-fzrlt requesting resource cpu=0m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod kube-proxy-7jxzv requesting resource cpu=0m on Node scw-sono13-default-71171af685174eada6c25c1541e
+May 29 19:41:32.024: INFO: Pod kube-proxy-s4qs6 requesting resource cpu=0m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod kubernetes-dashboard-794fb6974c-d7btd requesting resource cpu=0m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod metrics-server-794596bd9d-x9dz9 requesting resource cpu=0m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod monitoring-influxdb-7c84bfcfc8-snwmn requesting resource cpu=0m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod node-problem-detector-6bkln requesting resource cpu=20m on Node scw-sono13-default-2865dd8133304358ae8da697bb2
+May 29 19:41:32.024: INFO: Pod node-problem-detector-lbd8v requesting resource cpu=20m on Node scw-sono13-default-71171af685174eada6c25c1541e
+STEP: Starting Pods to consume most of the cluster CPU.
+STEP: Creating another pod that requires unavailable amount of CPU.
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2946e88-8249-11e9-bd6e-667e8fbec69d.15a33ec0648ccd57], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-xpgh4/filler-pod-c2946e88-8249-11e9-bd6e-667e8fbec69d to scw-sono13-default-2865dd8133304358ae8da697bb2]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2946e88-8249-11e9-bd6e-667e8fbec69d.15a33ec0a8d6c99d], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2946e88-8249-11e9-bd6e-667e8fbec69d.15a33ec0b0044183], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2946e88-8249-11e9-bd6e-667e8fbec69d.15a33ec0bd7a66bc], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2961a34-8249-11e9-bd6e-667e8fbec69d.15a33ec064ec9e70], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-xpgh4/filler-pod-c2961a34-8249-11e9-bd6e-667e8fbec69d to scw-sono13-default-71171af685174eada6c25c1541e]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2961a34-8249-11e9-bd6e-667e8fbec69d.15a33ec0afbd5033], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2961a34-8249-11e9-bd6e-667e8fbec69d.15a33ec0b5ea30a4], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-c2961a34-8249-11e9-bd6e-667e8fbec69d.15a33ec0c422be7c], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Warning], Name = [additional-pod.15a33ec170009a8d], Reason = [FailedScheduling], Message = [0/2 nodes are available: 2 Insufficient cpu.]
+STEP: removing the label node off the node scw-sono13-default-2865dd8133304358ae8da697bb2
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node scw-sono13-default-71171af685174eada6c25c1541e
+STEP: verifying the node doesn't have the label node
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:41:37.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-sched-pred-xpgh4" for this suite.
+May 29 19:41:43.622: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:41:43.703: INFO: namespace: e2e-tests-sched-pred-xpgh4, resource: bindings, ignored listing per whitelist
+May 29 19:41:43.890: INFO: namespace e2e-tests-sched-pred-xpgh4 deletion completed in 6.290044579s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:12.416 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:41:43.891: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename downward-api
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-downward-api-fkpd7
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward api env vars
+May 29 19:41:44.185: INFO: Waiting up to 5m0s for pod "downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-downward-api-fkpd7" to be "success or failure"
+May 29 19:41:44.192: INFO: Pod "downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.280633ms
+May 29 19:41:46.198: INFO: Pod "downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012424021s
+May 29 19:41:48.205: INFO: Pod "downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020057681s
+STEP: Saw pod success
+May 29 19:41:48.206: INFO: Pod "downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:41:48.211: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d container dapi-container: 
+STEP: delete the pod
+May 29 19:41:48.239: INFO: Waiting for pod downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:41:48.247: INFO: Pod downward-api-c9d2486a-8249-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:41:48.247: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-fkpd7" for this suite.
+May 29 19:41:54.276: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:41:54.546: INFO: namespace: e2e-tests-downward-api-fkpd7, resource: bindings, ignored listing per whitelist
+May 29 19:41:54.570: INFO: namespace e2e-tests-downward-api-fkpd7 deletion completed in 6.31506398s
+
+• [SLOW TEST:10.678 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-apps] ReplicaSet 
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:41:54.570: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename replicaset
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-replicaset-kbspc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Given a Pod with a 'name' label pod-adoption-release is created
+STEP: When a replicaset with a matching selector is created
+STEP: Then the orphan pod is adopted
+STEP: When the matched label of one of its pods change
+May 29 19:42:01.896: INFO: Pod name pod-adoption-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:42:01.916: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replicaset-kbspc" for this suite.
+May 29 19:42:23.946: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:42:24.151: INFO: namespace: e2e-tests-replicaset-kbspc, resource: bindings, ignored listing per whitelist
+May 29 19:42:24.158: INFO: namespace e2e-tests-replicaset-kbspc deletion completed in 22.232518166s
+
+• [SLOW TEST:29.588 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: http [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:42:24.158: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pod-network-test-x2c57
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: http [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-x2c57
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+May 29 19:42:24.434: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+May 29 19:42:48.650: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://100.64.0.110:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-x2c57 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:42:48.651: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:42:48.814: INFO: Found all expected endpoints: [netserver-0]
+May 29 19:42:48.821: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://100.64.1.214:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-x2c57 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May 29 19:42:48.821: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+May 29 19:42:48.961: INFO: Found all expected endpoints: [netserver-1]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:42:48.961: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pod-network-test-x2c57" for this suite.
+May 29 19:43:11.000: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:43:11.193: INFO: namespace: e2e-tests-pod-network-test-x2c57, resource: bindings, ignored listing per whitelist
+May 29 19:43:11.256: INFO: namespace e2e-tests-pod-network-test-x2c57 deletion completed in 22.284872745s
+
+• [SLOW TEST:47.098 seconds]
+[sig-network] Networking
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: http [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:43:11.258: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-tr9js
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:43:11.528: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:43:15.725: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-tr9js" for this suite.
+May 29 19:44:07.756: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:44:07.886: INFO: namespace: e2e-tests-pods-tr9js, resource: bindings, ignored listing per whitelist
+May 29 19:44:08.073: INFO: namespace e2e-tests-pods-tr9js deletion completed in 52.338966691s
+
+• [SLOW TEST:56.815 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:44:08.074: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-4lwt4
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-map-1fc75751-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:44:08.403: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-4lwt4" to be "success or failure"
+May 29 19:44:08.409: INFO: Pod "pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 5.654507ms
+May 29 19:44:10.417: INFO: Pod "pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013068608s
+May 29 19:44:12.434: INFO: Pod "pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030227335s
+STEP: Saw pod success
+May 29 19:44:12.434: INFO: Pod "pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:44:12.517: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d container projected-secret-volume-test: 
+STEP: delete the pod
+May 29 19:44:12.550: INFO: Waiting for pod pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:44:12.555: INFO: Pod pod-projected-secrets-1fc8598e-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:44:12.556: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-4lwt4" for this suite.
+May 29 19:44:18.593: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:44:18.674: INFO: namespace: e2e-tests-projected-4lwt4, resource: bindings, ignored listing per whitelist
+May 29 19:44:18.872: INFO: namespace e2e-tests-projected-4lwt4 deletion completed in 6.307619343s
+
+• [SLOW TEST:10.798 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Projected configMap 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:44:18.872: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-8rt4m
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name cm-test-opt-del-2633caa9-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating configMap with name cm-test-opt-upd-2633caf8-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating the pod
+STEP: Deleting configmap cm-test-opt-del-2633caa9-824a-11e9-bd6e-667e8fbec69d
+STEP: Updating configmap cm-test-opt-upd-2633caf8-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating configMap with name cm-test-opt-create-2633cb16-824a-11e9-bd6e-667e8fbec69d
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:44:25.360: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-8rt4m" for this suite.
+May 29 19:44:47.422: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:44:47.671: INFO: namespace: e2e-tests-projected-8rt4m, resource: bindings, ignored listing per whitelist
+May 29 19:44:47.715: INFO: namespace e2e-tests-projected-8rt4m deletion completed in 22.345640443s
+
+• [SLOW TEST:28.843 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:44:47.716: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-stb92
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-map-376294ae-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 19:44:48.009: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-stb92" to be "success or failure"
+May 29 19:44:48.015: INFO: Pod "pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.047556ms
+May 29 19:44:50.021: INFO: Pod "pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012636617s
+May 29 19:44:52.037: INFO: Pod "pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027989087s
+STEP: Saw pod success
+May 29 19:44:52.037: INFO: Pod "pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:44:52.043: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d container projected-configmap-volume-test: 
+STEP: delete the pod
+May 29 19:44:52.070: INFO: Waiting for pod pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:44:52.076: INFO: Pod pod-projected-configmaps-37638763-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:44:52.076: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-stb92" for this suite.
+May 29 19:44:58.106: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:44:58.219: INFO: namespace: e2e-tests-projected-stb92, resource: bindings, ignored listing per whitelist
+May 29 19:44:58.354: INFO: namespace e2e-tests-projected-stb92 deletion completed in 6.270722215s
+
+• [SLOW TEST:10.639 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl label 
+  should update the label on a resource  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:44:58.356: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-bpjlb
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1052
+STEP: creating the pod
+May 29 19:44:58.613: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-bpjlb'
+May 29 19:44:59.495: INFO: stderr: ""
+May 29 19:44:59.495: INFO: stdout: "pod/pause created\n"
+May 29 19:44:59.495: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause]
+May 29 19:44:59.495: INFO: Waiting up to 5m0s for pod "pause" in namespace "e2e-tests-kubectl-bpjlb" to be "running and ready"
+May 29 19:44:59.501: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 6.109292ms
+May 29 19:45:01.508: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012841691s
+May 29 19:45:03.524: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 4.029116073s
+May 29 19:45:03.525: INFO: Pod "pause" satisfied condition "running and ready"
+May 29 19:45:03.525: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause]
+[It] should update the label on a resource  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: adding the label testing-label with value testing-label-value to a pod
+May 29 19:45:03.525: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 label pods pause testing-label=testing-label-value --namespace=e2e-tests-kubectl-bpjlb'
+May 29 19:45:03.662: INFO: stderr: ""
+May 29 19:45:03.662: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod has the label testing-label with the value testing-label-value
+May 29 19:45:03.662: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pod pause -L testing-label --namespace=e2e-tests-kubectl-bpjlb'
+May 29 19:45:03.797: INFO: stderr: ""
+May 29 19:45:03.797: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          4s    testing-label-value\n"
+STEP: removing the label testing-label of a pod
+May 29 19:45:03.797: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 label pods pause testing-label- --namespace=e2e-tests-kubectl-bpjlb'
+May 29 19:45:03.946: INFO: stderr: ""
+May 29 19:45:03.946: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod doesn't have the label testing-label
+May 29 19:45:03.946: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pod pause -L testing-label --namespace=e2e-tests-kubectl-bpjlb'
+May 29 19:45:04.076: INFO: stderr: ""
+May 29 19:45:04.076: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          5s    \n"
+[AfterEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1059
+STEP: using delete to clean up resources
+May 29 19:45:04.076: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-bpjlb'
+May 29 19:45:04.207: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May 29 19:45:04.207: INFO: stdout: "pod \"pause\" force deleted\n"
+May 29 19:45:04.207: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get rc,svc -l name=pause --no-headers --namespace=e2e-tests-kubectl-bpjlb'
+May 29 19:45:04.350: INFO: stderr: "No resources found.\n"
+May 29 19:45:04.350: INFO: stdout: ""
+May 29 19:45:04.350: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 get pods -l name=pause --namespace=e2e-tests-kubectl-bpjlb -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+May 29 19:45:04.508: INFO: stderr: ""
+May 29 19:45:04.508: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:45:04.508: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-bpjlb" for this suite.
+May 29 19:45:10.538: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:45:10.663: INFO: namespace: e2e-tests-kubectl-bpjlb, resource: bindings, ignored listing per whitelist
+May 29 19:45:10.835: INFO: namespace e2e-tests-kubectl-bpjlb deletion completed in 6.317200716s
+
+• [SLOW TEST:12.479 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl label
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should update the label on a resource  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl patch 
+  should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:45:10.836: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename kubectl
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-kubectl-knzqh
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating Redis RC
+May 29 19:45:11.150: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 create -f - --namespace=e2e-tests-kubectl-knzqh'
+May 29 19:45:11.436: INFO: stderr: ""
+May 29 19:45:11.436: INFO: stdout: "replicationcontroller/redis-master created\n"
+STEP: Waiting for Redis master to start.
+May 29 19:45:12.522: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:45:12.522: INFO: Found 0 / 1
+May 29 19:45:13.444: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:45:13.444: INFO: Found 0 / 1
+May 29 19:45:14.451: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:45:14.451: INFO: Found 1 / 1
+May 29 19:45:14.451: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+STEP: patching all pods
+May 29 19:45:14.458: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:45:14.458: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+May 29 19:45:14.458: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-329215334 patch pod redis-master-vsw7l --namespace=e2e-tests-kubectl-knzqh -p {"metadata":{"annotations":{"x":"y"}}}'
+May 29 19:45:14.595: INFO: stderr: ""
+May 29 19:45:14.595: INFO: stdout: "pod/redis-master-vsw7l patched\n"
+STEP: checking annotations
+May 29 19:45:14.602: INFO: Selector matched 1 pods for map[app:redis]
+May 29 19:45:14.602: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:45:14.602: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-knzqh" for this suite.
+May 29 19:45:36.648: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:45:36.891: INFO: namespace: e2e-tests-kubectl-knzqh, resource: bindings, ignored listing per whitelist
+May 29 19:45:36.908: INFO: namespace e2e-tests-kubectl-knzqh deletion completed in 22.295878324s
+
+• [SLOW TEST:26.072 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl patch
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should add annotations for pods in rc  [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Projected secret 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:45:36.908: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-r2jlh
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name s-test-opt-del-54b5bcce-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating secret with name s-test-opt-upd-54b5bd19-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating the pod
+STEP: Deleting secret s-test-opt-del-54b5bcce-824a-11e9-bd6e-667e8fbec69d
+STEP: Updating secret s-test-opt-upd-54b5bd19-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating secret with name s-test-opt-create-54b5bd3b-824a-11e9-bd6e-667e8fbec69d
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:47:00.214: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-r2jlh" for this suite.
+May 29 19:47:22.242: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:47:22.416: INFO: namespace: e2e-tests-projected-r2jlh, resource: bindings, ignored listing per whitelist
+May 29 19:47:22.518: INFO: namespace e2e-tests-projected-r2jlh deletion completed in 22.296465802s
+
+• [SLOW TEST:105.610 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:47:22.518: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-sr5hj
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May 29 19:47:22.797: INFO: Waiting up to 5m0s for pod "downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-sr5hj" to be "success or failure"
+May 29 19:47:22.804: INFO: Pod "downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.159867ms
+May 29 19:47:24.811: INFO: Pod "downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014679652s
+May 29 19:47:26.819: INFO: Pod "downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021985135s
+STEP: Saw pod success
+May 29 19:47:26.819: INFO: Pod "downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:47:26.826: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d container client-container: 
+STEP: delete the pod
+May 29 19:47:26.859: INFO: Waiting for pod downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:47:26.865: INFO: Pod downwardapi-volume-93a64bda-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:47:26.865: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-sr5hj" for this suite.
+May 29 19:47:32.897: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:47:32.979: INFO: namespace: e2e-tests-projected-sr5hj, resource: bindings, ignored listing per whitelist
+May 29 19:47:33.146: INFO: namespace e2e-tests-projected-sr5hj deletion completed in 6.272276544s
+
+• [SLOW TEST:10.628 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Pods 
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:47:33.147: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename pods
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-pods-4vznk
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May 29 19:47:33.443: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:47:37.529: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-4vznk" for this suite.
+May 29 19:48:29.569: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:48:29.682: INFO: namespace: e2e-tests-pods-4vznk, resource: bindings, ignored listing per whitelist
+May 29 19:48:29.821: INFO: namespace e2e-tests-pods-4vznk deletion completed in 52.282581708s
+
+• [SLOW TEST:56.674 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:48:29.821: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-container-lifecycle-hook-srs6f
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the pod with lifecycle hook
+STEP: check poststart hook
+STEP: delete the pod with lifecycle hook
+May 29 19:48:38.167: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:38.174: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:40.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:40.191: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:42.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:42.182: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:44.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:44.182: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:46.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:46.184: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:48.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:48.183: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:50.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:50.182: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:52.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:52.190: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:54.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:54.183: INFO: Pod pod-with-poststart-exec-hook still exists
+May 29 19:48:56.175: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+May 29 19:48:56.182: INFO: Pod pod-with-poststart-exec-hook no longer exists
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:48:56.183: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-srs6f" for this suite.
+May 29 19:49:18.214: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:49:18.328: INFO: namespace: e2e-tests-container-lifecycle-hook-srs6f, resource: bindings, ignored listing per whitelist
+May 29 19:49:18.453: INFO: namespace e2e-tests-container-lifecycle-hook-srs6f deletion completed in 22.262154964s
+
+• [SLOW TEST:48.632 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute poststart exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:49:18.454: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename emptydir
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-emptydir-hdpx8
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+May 29 19:49:18.772: INFO: Waiting up to 5m0s for pod "pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-emptydir-hdpx8" to be "success or failure"
+May 29 19:49:18.778: INFO: Pod "pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.475821ms
+May 29 19:49:20.786: INFO: Pod "pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014459596s
+May 29 19:49:22.794: INFO: Pod "pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021684776s
+STEP: Saw pod success
+May 29 19:49:22.794: INFO: Pod "pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:49:22.801: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d container test-container: 
+STEP: delete the pod
+May 29 19:49:22.828: INFO: Waiting for pod pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:49:22.833: INFO: Pod pod-d8c6bb7f-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:49:22.833: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-hdpx8" for this suite.
+May 29 19:49:28.868: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:49:29.121: INFO: namespace: e2e-tests-emptydir-hdpx8, resource: bindings, ignored listing per whitelist
+May 29 19:49:29.139: INFO: namespace e2e-tests-emptydir-hdpx8 deletion completed in 6.291450205s
+
+• [SLOW TEST:10.685 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0777,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:49:29.140: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-9bvth
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-df26c23f-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:49:29.475: INFO: Waiting up to 5m0s for pod "pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-9bvth" to be "success or failure"
+May 29 19:49:29.482: INFO: Pod "pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 7.433512ms
+May 29 19:49:31.489: INFO: Pod "pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014156248s
+May 29 19:49:33.496: INFO: Pod "pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021809462s
+STEP: Saw pod success
+May 29 19:49:33.497: INFO: Pod "pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:49:33.518: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d container secret-volume-test: 
+STEP: delete the pod
+May 29 19:49:33.547: INFO: Waiting for pod pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:49:33.552: INFO: Pod pod-secrets-df27de2e-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:49:33.552: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-9bvth" for this suite.
+May 29 19:49:39.584: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:49:39.777: INFO: namespace: e2e-tests-secrets-9bvth, resource: bindings, ignored listing per whitelist
+May 29 19:49:39.878: INFO: namespace e2e-tests-secrets-9bvth deletion completed in 6.317795227s
+
+• [SLOW TEST:10.738 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:49:39.879: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename projected
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-projected-46dhc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-map-e586349f-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:49:40.174: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-projected-46dhc" to be "success or failure"
+May 29 19:49:40.181: INFO: Pod "pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.96497ms
+May 29 19:49:42.188: INFO: Pod "pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013684059s
+May 29 19:49:44.195: INFO: Pod "pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021268223s
+STEP: Saw pod success
+May 29 19:49:44.195: INFO: Pod "pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:49:44.201: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d container projected-secret-volume-test: 
+STEP: delete the pod
+May 29 19:49:44.229: INFO: Waiting for pod pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:49:44.236: INFO: Pod pod-projected-secrets-e588439f-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:49:44.236: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-46dhc" for this suite.
+May 29 19:49:50.270: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:49:50.456: INFO: namespace: e2e-tests-projected-46dhc, resource: bindings, ignored listing per whitelist
+May 29 19:49:50.514: INFO: namespace e2e-tests-projected-46dhc deletion completed in 6.270614434s
+
+• [SLOW TEST:10.636 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:49:50.515: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename configmap
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-configmap-46tx6
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-map-ebdbd774-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume configMaps
+May 29 19:49:50.794: INFO: Waiting up to 5m0s for pod "pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-configmap-46tx6" to be "success or failure"
+May 29 19:49:50.798: INFO: Pod "pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 4.523071ms
+May 29 19:49:52.805: INFO: Pod "pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011424231s
+May 29 19:49:54.813: INFO: Pod "pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019592022s
+STEP: Saw pod success
+May 29 19:49:54.814: INFO: Pod "pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:49:54.820: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d container configmap-volume-test: 
+STEP: delete the pod
+May 29 19:49:54.848: INFO: Waiting for pod pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:49:54.855: INFO: Pod pod-configmaps-ebdce503-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:49:54.855: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-46tx6" for this suite.
+May 29 19:50:00.887: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:50:01.035: INFO: namespace: e2e-tests-configmap-46tx6, resource: bindings, ignored listing per whitelist
+May 29 19:50:01.160: INFO: namespace e2e-tests-configmap-46tx6 deletion completed in 6.297705038s
+
+• [SLOW TEST:10.645 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:50:01.162: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-svcaccounts-rmkw9
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: getting the auto-created API token
+May 29 19:50:02.006: INFO: created pod pod-service-account-defaultsa
+May 29 19:50:02.006: INFO: pod pod-service-account-defaultsa service account token volume mount: true
+May 29 19:50:02.014: INFO: created pod pod-service-account-mountsa
+May 29 19:50:02.014: INFO: pod pod-service-account-mountsa service account token volume mount: true
+May 29 19:50:02.021: INFO: created pod pod-service-account-nomountsa
+May 29 19:50:02.021: INFO: pod pod-service-account-nomountsa service account token volume mount: false
+May 29 19:50:02.029: INFO: created pod pod-service-account-defaultsa-mountspec
+May 29 19:50:02.029: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true
+May 29 19:50:02.035: INFO: created pod pod-service-account-mountsa-mountspec
+May 29 19:50:02.035: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true
+May 29 19:50:02.042: INFO: created pod pod-service-account-nomountsa-mountspec
+May 29 19:50:02.042: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true
+May 29 19:50:02.049: INFO: created pod pod-service-account-defaultsa-nomountspec
+May 29 19:50:02.049: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false
+May 29 19:50:02.059: INFO: created pod pod-service-account-mountsa-nomountspec
+May 29 19:50:02.059: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false
+May 29 19:50:02.070: INFO: created pod pod-service-account-nomountsa-nomountspec
+May 29 19:50:02.070: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:50:02.070: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-svcaccounts-rmkw9" for this suite.
+May 29 19:50:08.159: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:50:08.372: INFO: namespace: e2e-tests-svcaccounts-rmkw9, resource: bindings, ignored listing per whitelist
+May 29 19:50:08.474: INFO: namespace e2e-tests-svcaccounts-rmkw9 deletion completed in 6.348244204s
+
+• [SLOW TEST:7.312 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22
+  should allow opting out of API token automount  [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:50:08.476: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename init-container
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-init-container-4fpsz
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+May 29 19:50:08.838: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:50:12.940: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-4fpsz" for this suite.
+May 29 19:50:18.983: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:50:19.087: INFO: namespace: e2e-tests-init-container-4fpsz, resource: bindings, ignored listing per whitelist
+May 29 19:50:19.263: INFO: namespace e2e-tests-init-container-4fpsz deletion completed in 6.312299527s
+
+• [SLOW TEST:10.787 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May 29 19:50:19.263: INFO: >>> kubeConfig: /tmp/kubeconfig-329215334
+STEP: Building a namespace api object, basename secrets
+STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in e2e-tests-secrets-lqbl4
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-fd0a4e29-824a-11e9-bd6e-667e8fbec69d
+STEP: Creating a pod to test consume secrets
+May 29 19:50:19.618: INFO: Waiting up to 5m0s for pod "pod-secrets-fd0b4a88-824a-11e9-bd6e-667e8fbec69d" in namespace "e2e-tests-secrets-lqbl4" to be "success or failure"
+May 29 19:50:19.625: INFO: Pod "pod-secrets-fd0b4a88-824a-11e9-bd6e-667e8fbec69d": Phase="Pending", Reason="", readiness=false. Elapsed: 6.719289ms
+May 29 19:50:21.632: INFO: Pod "pod-secrets-fd0b4a88-824a-11e9-bd6e-667e8fbec69d": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013507747s
+STEP: Saw pod success
+May 29 19:50:21.632: INFO: Pod "pod-secrets-fd0b4a88-824a-11e9-bd6e-667e8fbec69d" satisfied condition "success or failure"
+May 29 19:50:21.638: INFO: Trying to get logs from node scw-sono13-default-71171af685174eada6c25c1541e pod pod-secrets-fd0b4a88-824a-11e9-bd6e-667e8fbec69d container secret-volume-test: 
+STEP: delete the pod
+May 29 19:50:21.664: INFO: Waiting for pod pod-secrets-fd0b4a88-824a-11e9-bd6e-667e8fbec69d to disappear
+May 29 19:50:21.670: INFO: Pod pod-secrets-fd0b4a88-824a-11e9-bd6e-667e8fbec69d no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May 29 19:50:21.670: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-lqbl4" for this suite.
+May 29 19:50:27.697: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May 29 19:50:27.913: INFO: namespace: e2e-tests-secrets-lqbl4, resource: bindings, ignored listing per whitelist
+May 29 19:50:27.959: INFO: namespace e2e-tests-secrets-lqbl4 deletion completed in 6.282614394s
+
+• [SLOW TEST:8.696 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.0-rc.2.1+ddf47ac13c1a94/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSMay 29 19:50:27.960: INFO: Running AfterSuite actions on all nodes
+May 29 19:50:27.960: INFO: Running AfterSuite actions on node 1
+May 29 19:50:27.960: INFO: Skipping dumping logs from cluster
+
+Ran 200 of 1946 Specs in 5698.021 seconds
+SUCCESS! -- 200 Passed | 0 Failed | 0 Pending | 1746 Skipped PASS
+
+Ginkgo ran 1 suite in 1h34m58.928876726s
+Test Suite Passed
diff --git a/v1.13/scaleway/junit_01.xml b/v1.13/scaleway/junit_01.xml
new file mode 100644
index 0000000000..06a5c16941
--- /dev/null
+++ b/v1.13/scaleway/junit_01.xml
@@ -0,0 +1,5441 @@
+
+  
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+  
\ No newline at end of file