diff --git a/v1.14/kubermatic/PRODUCT.yaml b/v1.14/kubermatic/PRODUCT.yaml new file mode 100644 index 0000000000..d925cb8600 --- /dev/null +++ b/v1.14/kubermatic/PRODUCT.yaml @@ -0,0 +1,7 @@ +vendor: Loodse +name: Kubermatic Container Engine +version: v2.10 +website_url: https://loodse.com +documentation_url: https://docs.kubermatic.io/ +type: Distribution +product_logo_url: https://drive.google.com/file/d/1NdEwTa_Aco_urdyY7bhDjJEPcpyAYRY9 diff --git a/v1.14/kubermatic/README.md b/v1.14/kubermatic/README.md new file mode 100644 index 0000000000..bd627b4f41 --- /dev/null +++ b/v1.14/kubermatic/README.md @@ -0,0 +1,52 @@ +# To reproduce + +## Set up the cluster + +1. Login to https://cloud.kubermatic.io/ +2. Press the "Create Cluster" button +3. Pick Kubernetes version v1.14.x +4. Complete the create cluster wizard with AWS. + +When the cluster is up and running, + +1. Download the kubeconfig file. +2. Set the KUBECONFIG environment variable `export KUBECONFIG=$PWD/kubeconfig`. + +## Run the conformance test + +Download a [binary release](https://github.com/heptio/sonobuoy/releases) of the CLI, or build it yourself by running: + +``` +$ go get -u -v github.com/heptio/sonobuoy +``` + +Deploy a Sonobuoy pod to your cluster with: + +``` +$ sonobuoy run +``` + +View actively running pods: + +``` +$ sonobuoy status +``` + +To inspect the logs: + +``` +$ sonobuoy logs +``` + +Once `sonobuoy status` shows the run as `completed`, copy the output directory from the main Sonobuoy pod to a local directory: + +``` +$ outfile=$(sonobuoy retrieve) +``` + +This copies a single `.tar.gz` snapshot from the Sonobuoy pod into your local +`.` directory. Extract the contents into `./results` with: + +``` +mkdir ./results; tar xzf $outfile -C ./results +``` \ No newline at end of file diff --git a/v1.14/kubermatic/e2e.log b/v1.14/kubermatic/e2e.log new file mode 100644 index 0000000000..fd636a91b9 --- /dev/null +++ b/v1.14/kubermatic/e2e.log @@ -0,0 +1,10956 @@ +I0604 15:54:28.449927 15 test_context.go:405] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-441229521 +I0604 15:54:28.450159 15 e2e.go:240] Starting e2e run "081e846b-86e1-11e9-a2b6-96b18e3e6fac" on Ginkgo node 1 +Running Suite: Kubernetes e2e suite +=================================== +Random Seed: 1559663667 - Will randomize all specs +Will run 204 of 3584 specs + +Jun 4 15:54:28.594: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +Jun 4 15:54:28.596: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +Jun 4 15:54:28.624: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +Jun 4 15:54:28.663: INFO: 16 / 16 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +Jun 4 15:54:28.663: INFO: expected 4 pod replicas in namespace 'kube-system', 4 are Running and Ready. +Jun 4 15:54:28.663: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +Jun 4 15:54:28.674: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'canal' (0 seconds elapsed) +Jun 4 15:54:28.674: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'kube-proxy' (0 seconds elapsed) +Jun 4 15:54:28.674: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'node-exporter' (0 seconds elapsed) +Jun 4 15:54:28.674: INFO: 3 / 3 pods ready in namespace 'kube-system' in daemonset 'node-local-dns' (0 seconds elapsed) +Jun 4 15:54:28.674: INFO: e2e test version: v1.14.1 +Jun 4 15:54:28.677: INFO: kube-apiserver version: v1.14.1 +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:54:28.677: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename secrets +Jun 4 15:54:28.813: INFO: No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled. +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name secret-test-08fefa20-86e1-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume secrets +Jun 4 15:54:28.842: INFO: Waiting up to 5m0s for pod "pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac" in namespace "secrets-2189" to be "success or failure" +Jun 4 15:54:28.850: INFO: Pod "pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.156959ms +Jun 4 15:54:30.858: INFO: Pod "pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015854887s +Jun 4 15:54:32.863: INFO: Pod "pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02110302s +STEP: Saw pod success +Jun 4 15:54:32.863: INFO: Pod "pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 15:54:32.867: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac container secret-volume-test: +STEP: delete the pod +Jun 4 15:54:32.959: INFO: Waiting for pod pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 15:54:32.963: INFO: Pod pod-secrets-0900402b-86e1-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:54:32.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-2189" for this suite. +Jun 4 15:54:38.991: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:54:39.531: INFO: namespace secrets-2189 deletion completed in 6.561093047s + +• [SLOW TEST:10.853 seconds] +[sig-storage] Secrets +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl logs + should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:54:39.531: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl logs + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1190 +STEP: creating an rc +Jun 4 15:54:39.577: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-6084' +Jun 4 15:54:40.903: INFO: stderr: "" +Jun 4 15:54:40.903: INFO: stdout: "replicationcontroller/redis-master created\n" +[It] should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Waiting for Redis master to start. +Jun 4 15:54:41.909: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 15:54:41.909: INFO: Found 0 / 1 +Jun 4 15:54:42.909: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 15:54:42.909: INFO: Found 0 / 1 +Jun 4 15:54:43.924: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 15:54:43.924: INFO: Found 0 / 1 +Jun 4 15:54:44.909: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 15:54:44.909: INFO: Found 0 / 1 +Jun 4 15:54:45.909: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 15:54:45.909: INFO: Found 1 / 1 +Jun 4 15:54:45.909: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Jun 4 15:54:45.915: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 15:54:45.915: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +STEP: checking for a matching strings +Jun 4 15:54:45.915: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 logs redis-master-j9lhq redis-master --namespace=kubectl-6084' +Jun 4 15:54:46.106: INFO: stderr: "" +Jun 4 15:54:46.106: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 04 Jun 15:54:43.949 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 04 Jun 15:54:43.950 # Server started, Redis version 3.2.12\n1:M 04 Jun 15:54:43.950 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 04 Jun 15:54:43.950 * The server is now ready to accept connections on port 6379\n" +STEP: limiting log lines +Jun 4 15:54:46.106: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 log redis-master-j9lhq redis-master --namespace=kubectl-6084 --tail=1' +Jun 4 15:54:46.321: INFO: stderr: "" +Jun 4 15:54:46.321: INFO: stdout: "1:M 04 Jun 15:54:43.950 * The server is now ready to accept connections on port 6379\n" +STEP: limiting log bytes +Jun 4 15:54:46.321: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 log redis-master-j9lhq redis-master --namespace=kubectl-6084 --limit-bytes=1' +Jun 4 15:54:46.505: INFO: stderr: "" +Jun 4 15:54:46.506: INFO: stdout: " " +STEP: exposing timestamps +Jun 4 15:54:46.506: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 log redis-master-j9lhq redis-master --namespace=kubectl-6084 --tail=1 --timestamps' +Jun 4 15:54:46.612: INFO: stderr: "" +Jun 4 15:54:46.612: INFO: stdout: "2019-06-04T15:54:43.950352353Z 1:M 04 Jun 15:54:43.950 * The server is now ready to accept connections on port 6379\n" +STEP: restricting to a time range +Jun 4 15:54:49.113: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 log redis-master-j9lhq redis-master --namespace=kubectl-6084 --since=1s' +Jun 4 15:54:49.277: INFO: stderr: "" +Jun 4 15:54:49.277: INFO: stdout: "" +Jun 4 15:54:49.277: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 log redis-master-j9lhq redis-master --namespace=kubectl-6084 --since=24h' +Jun 4 15:54:49.621: INFO: stderr: "" +Jun 4 15:54:49.621: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 04 Jun 15:54:43.949 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 04 Jun 15:54:43.950 # Server started, Redis version 3.2.12\n1:M 04 Jun 15:54:43.950 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 04 Jun 15:54:43.950 * The server is now ready to accept connections on port 6379\n" +[AfterEach] [k8s.io] Kubectl logs + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1196 +STEP: using delete to clean up resources +Jun 4 15:54:49.621: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-6084' +Jun 4 15:54:49.815: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 4 15:54:49.815: INFO: stdout: "replicationcontroller \"redis-master\" force deleted\n" +Jun 4 15:54:49.815: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=nginx --no-headers --namespace=kubectl-6084' +Jun 4 15:54:49.904: INFO: stderr: "No resources found.\n" +Jun 4 15:54:49.904: INFO: stdout: "" +Jun 4 15:54:49.904: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=nginx --namespace=kubectl-6084 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +Jun 4 15:54:50.026: INFO: stderr: "" +Jun 4 15:54:50.026: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:54:50.026: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-6084" for this suite. +Jun 4 15:54:56.066: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:54:56.742: INFO: namespace kubectl-6084 deletion completed in 6.708470008s + +• [SLOW TEST:17.211 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl logs + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should be able to retrieve and filter logs [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy through a service and a pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] version v1 + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:54:56.742: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename proxy +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy through a service and a pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: starting an echo server on multiple ports +STEP: creating replication controller proxy-service-qtx9k in namespace proxy-8936 +I0604 15:54:56.870060 15 runners.go:184] Created replication controller with name: proxy-service-qtx9k, namespace: proxy-8936, replica count: 1 +I0604 15:54:57.970395 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0604 15:54:58.970552 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0604 15:54:59.970705 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0604 15:55:00.970877 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0604 15:55:01.971080 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:02.971256 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:03.971409 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:04.971574 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:05.972038 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:06.972206 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:07.972372 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:08.972621 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:09.972775 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:10.972928 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0604 15:55:11.973247 15 runners.go:184] proxy-service-qtx9k Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +Jun 4 15:55:11.981: INFO: setup took 15.15517429s, starting test cases +STEP: running 16 cases, 20 attempts per case, 320 total attempts +Jun 4 15:55:12.147: INFO: (0) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 165.946557ms) +Jun 4 15:55:12.222: INFO: (0) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 240.712254ms) +Jun 4 15:55:12.305: INFO: (0) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 323.044481ms) +Jun 4 15:55:12.305: INFO: (0) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 323.179368ms) +Jun 4 15:55:12.325: INFO: (0) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 342.70407ms) +Jun 4 15:55:12.325: INFO: (0) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 343.182729ms) +Jun 4 15:55:12.325: INFO: (0) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 342.967248ms) +Jun 4 15:55:12.325: INFO: (0) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 343.150099ms) +Jun 4 15:55:12.325: INFO: (0) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test<... (200; 55.853428ms) +Jun 4 15:55:12.523: INFO: (1) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 56.099772ms) +Jun 4 15:55:12.523: INFO: (1) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 56.430786ms) +Jun 4 15:55:12.557: INFO: (1) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 90.840644ms) +Jun 4 15:55:12.558: INFO: (1) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 91.297185ms) +Jun 4 15:55:12.558: INFO: (1) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 91.671627ms) +Jun 4 15:55:12.559: INFO: (1) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 92.560554ms) +Jun 4 15:55:12.559: INFO: (1) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 92.674772ms) +Jun 4 15:55:12.561: INFO: (1) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 94.27492ms) +Jun 4 15:55:12.561: INFO: (1) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 95.06693ms) +Jun 4 15:55:12.581: INFO: (2) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 19.704097ms) +Jun 4 15:55:12.584: INFO: (2) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 22.937794ms) +Jun 4 15:55:12.585: INFO: (2) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 23.003774ms) +Jun 4 15:55:12.585: INFO: (2) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 23.623679ms) +Jun 4 15:55:12.586: INFO: (2) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 24.628631ms) +Jun 4 15:55:12.586: INFO: (2) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 24.481454ms) +Jun 4 15:55:12.587: INFO: (2) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 24.825506ms) +Jun 4 15:55:12.604: INFO: (2) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: ... (200; 64.15521ms) +Jun 4 15:55:12.626: INFO: (2) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 64.133101ms) +Jun 4 15:55:12.626: INFO: (2) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 64.135464ms) +Jun 4 15:55:12.627: INFO: (2) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 65.349673ms) +Jun 4 15:55:12.628: INFO: (2) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 65.892271ms) +Jun 4 15:55:12.628: INFO: (2) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 65.801838ms) +Jun 4 15:55:12.628: INFO: (2) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 66.104031ms) +Jun 4 15:55:12.628: INFO: (2) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 65.941579ms) +Jun 4 15:55:12.707: INFO: (3) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 79.120225ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 93.85398ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 93.851007ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 93.784626ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 93.494537ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 93.763863ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 93.46842ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 93.588373ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 93.652698ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 94.211243ms) +Jun 4 15:55:12.722: INFO: (3) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test (200; 73.065552ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 73.256234ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 73.026877ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 73.210903ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 73.199104ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 73.270386ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 73.063412ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 73.525214ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 73.441969ms) +Jun 4 15:55:12.822: INFO: (4) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 73.637615ms) +Jun 4 15:55:12.851: INFO: (4) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 102.097778ms) +Jun 4 15:55:12.851: INFO: (4) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 101.859798ms) +Jun 4 15:55:12.851: INFO: (4) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 102.019455ms) +Jun 4 15:55:12.854: INFO: (4) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 105.371331ms) +Jun 4 15:55:12.869: INFO: (5) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: ... (200; 15.192922ms) +Jun 4 15:55:12.870: INFO: (5) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 15.275128ms) +Jun 4 15:55:12.870: INFO: (5) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 15.037072ms) +Jun 4 15:55:12.870: INFO: (5) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 15.127333ms) +Jun 4 15:55:12.870: INFO: (5) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 15.257324ms) +Jun 4 15:55:12.870: INFO: (5) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 15.499177ms) +Jun 4 15:55:12.909: INFO: (5) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 54.634647ms) +Jun 4 15:55:12.909: INFO: (5) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 54.478833ms) +Jun 4 15:55:12.909: INFO: (5) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 54.338027ms) +Jun 4 15:55:12.909: INFO: (5) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 54.591277ms) +Jun 4 15:55:12.909: INFO: (5) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 54.810272ms) +Jun 4 15:55:12.909: INFO: (5) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 54.413243ms) +Jun 4 15:55:12.929: INFO: (6) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 19.707001ms) +Jun 4 15:55:12.930: INFO: (6) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 20.379789ms) +Jun 4 15:55:12.930: INFO: (6) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 20.572233ms) +Jun 4 15:55:12.932: INFO: (6) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 22.307098ms) +Jun 4 15:55:12.937: INFO: (6) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 26.560799ms) +Jun 4 15:55:12.938: INFO: (6) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 27.696508ms) +Jun 4 15:55:12.938: INFO: (6) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 27.555924ms) +Jun 4 15:55:12.938: INFO: (6) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 27.970281ms) +Jun 4 15:55:12.938: INFO: (6) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 27.564357ms) +Jun 4 15:55:12.938: INFO: (6) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 27.748077ms) +Jun 4 15:55:12.938: INFO: (6) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 27.68478ms) +Jun 4 15:55:12.938: INFO: (6) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test<... (200; 62.546036ms) +Jun 4 15:55:13.012: INFO: (7) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 64.1493ms) +Jun 4 15:55:13.012: INFO: (7) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 64.255201ms) +Jun 4 15:55:13.012: INFO: (7) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 64.058363ms) +Jun 4 15:55:13.012: INFO: (7) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 64.379952ms) +Jun 4 15:55:13.012: INFO: (7) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test (200; 64.053541ms) +Jun 4 15:55:13.018: INFO: (7) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 69.89115ms) +Jun 4 15:55:13.026: INFO: (7) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 78.378177ms) +Jun 4 15:55:13.026: INFO: (7) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 78.316199ms) +Jun 4 15:55:13.026: INFO: (7) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 78.502904ms) +Jun 4 15:55:13.027: INFO: (7) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 78.223812ms) +Jun 4 15:55:13.054: INFO: (8) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 26.637941ms) +Jun 4 15:55:13.054: INFO: (8) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 27.090121ms) +Jun 4 15:55:13.054: INFO: (8) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 27.292894ms) +Jun 4 15:55:13.054: INFO: (8) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 26.951002ms) +Jun 4 15:55:13.058: INFO: (8) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 30.948997ms) +Jun 4 15:55:13.058: INFO: (8) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 30.982804ms) +Jun 4 15:55:13.058: INFO: (8) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 30.958032ms) +Jun 4 15:55:13.058: INFO: (8) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 31.187156ms) +Jun 4 15:55:13.058: INFO: (8) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test<... (200; 103.594852ms) +Jun 4 15:55:13.211: INFO: (9) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test (200; 103.124355ms) +Jun 4 15:55:13.211: INFO: (9) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 104.0841ms) +Jun 4 15:55:13.211: INFO: (9) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 104.053024ms) +Jun 4 15:55:13.211: INFO: (9) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 103.323719ms) +Jun 4 15:55:13.211: INFO: (9) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 103.24561ms) +Jun 4 15:55:13.211: INFO: (9) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 103.991988ms) +Jun 4 15:55:13.286: INFO: (9) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 179.163429ms) +Jun 4 15:55:13.286: INFO: (9) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 179.003764ms) +Jun 4 15:55:13.286: INFO: (9) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 179.137469ms) +Jun 4 15:55:13.286: INFO: (9) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 178.921878ms) +Jun 4 15:55:13.307: INFO: (10) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 19.6191ms) +Jun 4 15:55:13.307: INFO: (10) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 20.138692ms) +Jun 4 15:55:13.307: INFO: (10) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 20.029508ms) +Jun 4 15:55:13.309: INFO: (10) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 21.660013ms) +Jun 4 15:55:13.309: INFO: (10) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 22.048311ms) +Jun 4 15:55:13.309: INFO: (10) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test<... (200; 117.372547ms) +Jun 4 15:55:13.623: INFO: (11) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 117.471496ms) +Jun 4 15:55:13.623: INFO: (11) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 117.236904ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 217.099638ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 217.169733ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 217.108903ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 217.351943ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 217.519808ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 217.151266ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 217.391103ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 217.353012ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 217.257504ms) +Jun 4 15:55:13.723: INFO: (11) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 217.205881ms) +Jun 4 15:55:13.787: INFO: (12) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 64.24898ms) +Jun 4 15:55:13.904: INFO: (12) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 180.605205ms) +Jun 4 15:55:13.905: INFO: (12) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 181.693024ms) +Jun 4 15:55:13.905: INFO: (12) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 181.456264ms) +Jun 4 15:55:13.905: INFO: (12) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 181.213293ms) +Jun 4 15:55:13.905: INFO: (12) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 181.367413ms) +Jun 4 15:55:13.905: INFO: (12) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: ... (200; 181.128187ms) +Jun 4 15:55:13.905: INFO: (12) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 181.806756ms) +Jun 4 15:55:13.906: INFO: (12) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 182.131236ms) +Jun 4 15:55:14.005: INFO: (12) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 281.340365ms) +Jun 4 15:55:14.058: INFO: (12) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 334.156885ms) +Jun 4 15:55:14.058: INFO: (12) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 334.570339ms) +Jun 4 15:55:14.058: INFO: (12) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 334.749849ms) +Jun 4 15:55:14.058: INFO: (12) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 334.623595ms) +Jun 4 15:55:14.070: INFO: (13) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 11.571202ms) +Jun 4 15:55:14.072: INFO: (13) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 13.957524ms) +Jun 4 15:55:14.073: INFO: (13) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 14.658727ms) +Jun 4 15:55:14.075: INFO: (13) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 16.394368ms) +Jun 4 15:55:14.161: INFO: (13) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 101.83728ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 105.762917ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 105.757816ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test (200; 106.251136ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 106.36246ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 106.193652ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 105.958975ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 106.393232ms) +Jun 4 15:55:14.165: INFO: (13) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 106.224917ms) +Jun 4 15:55:14.322: INFO: (13) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 263.604949ms) +Jun 4 15:55:14.322: INFO: (13) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 263.460815ms) +Jun 4 15:55:14.564: INFO: (14) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 241.231785ms) +Jun 4 15:55:14.564: INFO: (14) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 240.905257ms) +Jun 4 15:55:14.565: INFO: (14) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 241.818336ms) +Jun 4 15:55:14.565: INFO: (14) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 241.655671ms) +Jun 4 15:55:14.565: INFO: (14) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 241.901436ms) +Jun 4 15:55:14.565: INFO: (14) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 242.246788ms) +Jun 4 15:55:14.566: INFO: (14) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 243.398271ms) +Jun 4 15:55:14.567: INFO: (14) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 243.828003ms) +Jun 4 15:55:14.567: INFO: (14) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 243.776652ms) +Jun 4 15:55:14.567: INFO: (14) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 244.139391ms) +Jun 4 15:55:14.567: INFO: (14) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: ... (200; 12.577425ms) +Jun 4 15:55:14.617: INFO: (15) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 13.212872ms) +Jun 4 15:55:14.617: INFO: (15) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 13.188371ms) +Jun 4 15:55:14.617: INFO: (15) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 12.78101ms) +Jun 4 15:55:14.623: INFO: (15) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 18.384051ms) +Jun 4 15:55:14.623: INFO: (15) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 18.300243ms) +Jun 4 15:55:14.623: INFO: (15) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test (200; 18.549239ms) +Jun 4 15:55:14.623: INFO: (15) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 18.662088ms) +Jun 4 15:55:14.623: INFO: (15) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 19.03091ms) +Jun 4 15:55:14.625: INFO: (15) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 20.382356ms) +Jun 4 15:55:14.625: INFO: (15) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 20.15036ms) +Jun 4 15:55:14.629: INFO: (15) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 24.493623ms) +Jun 4 15:55:14.629: INFO: (15) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 24.655989ms) +Jun 4 15:55:14.629: INFO: (15) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 24.630914ms) +Jun 4 15:55:14.708: INFO: (16) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 78.592434ms) +Jun 4 15:55:14.722: INFO: (16) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 92.909409ms) +Jun 4 15:55:14.722: INFO: (16) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test<... (200; 93.494414ms) +Jun 4 15:55:14.722: INFO: (16) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 93.406712ms) +Jun 4 15:55:14.723: INFO: (16) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 93.488109ms) +Jun 4 15:55:14.723: INFO: (16) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 93.377086ms) +Jun 4 15:55:14.723: INFO: (16) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 93.250535ms) +Jun 4 15:55:14.723: INFO: (16) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 93.397425ms) +Jun 4 15:55:14.723: INFO: (16) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 93.399031ms) +Jun 4 15:55:14.723: INFO: (16) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 93.361105ms) +Jun 4 15:55:14.723: INFO: (16) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 93.233505ms) +Jun 4 15:55:14.748: INFO: (16) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 118.970055ms) +Jun 4 15:55:14.748: INFO: (16) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 118.656634ms) +Jun 4 15:55:14.749: INFO: (16) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 120.020881ms) +Jun 4 15:55:14.751: INFO: (16) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 121.273671ms) +Jun 4 15:55:14.767: INFO: (17) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 15.934683ms) +Jun 4 15:55:14.767: INFO: (17) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 16.475054ms) +Jun 4 15:55:14.768: INFO: (17) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 16.478789ms) +Jun 4 15:55:14.768: INFO: (17) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 17.028507ms) +Jun 4 15:55:14.768: INFO: (17) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: ... (200; 72.011262ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 71.55816ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 71.720826ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 71.909182ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 71.682097ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 72.052118ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 72.003525ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 72.291609ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 72.005604ms) +Jun 4 15:55:14.823: INFO: (17) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 72.272715ms) +Jun 4 15:55:14.842: INFO: (18) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 17.915978ms) +Jun 4 15:55:14.842: INFO: (18) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: test<... (200; 18.552719ms) +Jun 4 15:55:14.842: INFO: (18) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 18.049879ms) +Jun 4 15:55:14.842: INFO: (18) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 18.112117ms) +Jun 4 15:55:14.842: INFO: (18) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:1080/proxy/: ... (200; 18.49709ms) +Jun 4 15:55:14.842: INFO: (18) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 18.583732ms) +Jun 4 15:55:14.843: INFO: (18) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 19.326628ms) +Jun 4 15:55:14.843: INFO: (18) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 18.84606ms) +Jun 4 15:55:14.844: INFO: (18) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 19.437844ms) +Jun 4 15:55:14.847: INFO: (18) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 22.718609ms) +Jun 4 15:55:14.849: INFO: (18) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 24.901778ms) +Jun 4 15:55:14.889: INFO: (18) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 65.284709ms) +Jun 4 15:55:14.889: INFO: (18) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 65.406015ms) +Jun 4 15:55:14.890: INFO: (18) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 66.056774ms) +Jun 4 15:55:14.890: INFO: (18) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 65.886515ms) +Jun 4 15:55:15.003: INFO: (19) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 113.167439ms) +Jun 4 15:55:15.003: INFO: (19) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:443/proxy/: ... (200; 113.309065ms) +Jun 4 15:55:15.017: INFO: (19) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname1/proxy/: tls baz (200; 126.358849ms) +Jun 4 15:55:15.017: INFO: (19) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb/proxy/: test (200; 126.297075ms) +Jun 4 15:55:15.017: INFO: (19) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 126.888192ms) +Jun 4 15:55:15.017: INFO: (19) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:462/proxy/: tls qux (200; 126.26564ms) +Jun 4 15:55:15.017: INFO: (19) /api/v1/namespaces/proxy-8936/pods/https:proxy-service-qtx9k-xspzb:460/proxy/: tls baz (200; 126.72459ms) +Jun 4 15:55:15.017: INFO: (19) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:1080/proxy/: test<... (200; 126.282589ms) +Jun 4 15:55:15.033: INFO: (19) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname1/proxy/: foo (200; 142.220615ms) +Jun 4 15:55:15.033: INFO: (19) /api/v1/namespaces/proxy-8936/services/https:proxy-service-qtx9k:tlsportname2/proxy/: tls qux (200; 142.54644ms) +Jun 4 15:55:15.033: INFO: (19) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname2/proxy/: bar (200; 142.632679ms) +Jun 4 15:55:15.033: INFO: (19) /api/v1/namespaces/proxy-8936/pods/http:proxy-service-qtx9k-xspzb:162/proxy/: bar (200; 142.274293ms) +Jun 4 15:55:15.033: INFO: (19) /api/v1/namespaces/proxy-8936/pods/proxy-service-qtx9k-xspzb:160/proxy/: foo (200; 142.528015ms) +Jun 4 15:55:15.034: INFO: (19) /api/v1/namespaces/proxy-8936/services/http:proxy-service-qtx9k:portname1/proxy/: foo (200; 143.683182ms) +Jun 4 15:55:15.044: INFO: (19) /api/v1/namespaces/proxy-8936/services/proxy-service-qtx9k:portname2/proxy/: bar (200; 153.20058ms) +STEP: deleting ReplicationController proxy-service-qtx9k in namespace proxy-8936, will wait for the garbage collector to delete the pods +Jun 4 15:55:15.131: INFO: Deleting ReplicationController proxy-service-qtx9k took: 23.479109ms +Jun 4 15:55:15.531: INFO: Terminating ReplicationController proxy-service-qtx9k pods took: 400.251278ms +[AfterEach] version v1 + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:55:21.832: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "proxy-8936" for this suite. +Jun 4 15:55:27.924: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:55:29.301: INFO: namespace proxy-8936 deletion completed in 7.46088565s + +• [SLOW TEST:32.559 seconds] +[sig-network] Proxy +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + version v1 + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/proxy.go:56 + should proxy through a service and a pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSS +------------------------------ +[sig-storage] Downward API volume + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:55:29.301: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating the pod +Jun 4 15:55:33.953: INFO: Successfully updated pod "annotationupdate2d12fdf5-86e1-11e9-a2b6-96b18e3e6fac" +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:55:36.109: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-4596" for this suite. +Jun 4 15:55:58.202: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:55:58.847: INFO: namespace downward-api-4596 deletion completed in 22.730316144s + +• [SLOW TEST:29.545 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod + should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:55:58.847: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81 +[It] should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:56:03.128: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubelet-test-2062" for this suite. +Jun 4 15:56:09.155: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:56:09.544: INFO: namespace kubelet-test-2062 deletion completed in 6.407659833s + +• [SLOW TEST:10.696 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78 + should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +S +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:56:09.544: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename sched-pred +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79 +Jun 4 15:56:09.730: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Jun 4 15:56:09.744: INFO: Waiting for terminating namespaces to be deleted... +Jun 4 15:56:09.748: INFO: +Logging pods the kubelet thinks is on node ip-172-31-11-48.eu-central-1.compute.internal before test +Jun 4 15:56:09.916: INFO: kube-proxy-8f464 from kube-system started at 2019-06-04 14:59:19 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container kube-proxy ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: node-local-dns-bqd4m from kube-system started at 2019-06-04 14:59:59 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container node-cache ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: canal-dqcxs from kube-system started at 2019-06-04 14:59:19 +0000 UTC (3 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container calico-node ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: Container install-cni ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: Container kube-flannel ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: coredns-568fd445fd-l7bhx from kube-system started at 2019-06-04 15:00:00 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container coredns ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-tmnxg from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: Container systemd-logs ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: node-exporter-fm98z from kube-system started at 2019-06-04 14:59:19 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container kube-rbac-proxy ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: Container node-exporter ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: openvpn-client-5bbcf59684-r2rls from kube-system started at 2019-06-04 14:59:59 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container dnat-controller ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: Container openvpn-client ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: kubernetes-dashboard-57dcd9448b-pcpsp from kube-system started at 2019-06-04 14:59:59 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container kubernetes-dashboard ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: coredns-568fd445fd-q5bsd from kube-system started at 2019-06-04 15:00:00 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:09.916: INFO: Container coredns ready: true, restart count 0 +Jun 4 15:56:09.916: INFO: +Logging pods the kubelet thinks is on node ip-172-31-9-156.eu-central-1.compute.internal before test +Jun 4 15:56:10.021: INFO: node-exporter-2bq9l from kube-system started at 2019-06-04 14:59:24 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:10.021: INFO: Container kube-rbac-proxy ready: true, restart count 0 +Jun 4 15:56:10.021: INFO: Container node-exporter ready: true, restart count 0 +Jun 4 15:56:10.021: INFO: canal-5xshg from kube-system started at 2019-06-04 14:59:24 +0000 UTC (3 container statuses recorded) +Jun 4 15:56:10.021: INFO: Container calico-node ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: Container install-cni ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: Container kube-flannel ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: kube-proxy-zvrkb from kube-system started at 2019-06-04 14:59:24 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:10.022: INFO: Container kube-proxy ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: node-local-dns-t84xd from kube-system started at 2019-06-04 15:00:24 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:10.022: INFO: Container node-cache ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-04 15:54:23 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:10.022: INFO: Container kube-sonobuoy ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-bmnlh from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:10.022: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: Container systemd-logs ready: true, restart count 0 +Jun 4 15:56:10.022: INFO: +Logging pods the kubelet thinks is on node ip-172-31-9-162.eu-central-1.compute.internal before test +Jun 4 15:56:10.448: INFO: node-exporter-gkmxz from kube-system started at 2019-06-04 14:59:30 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:10.448: INFO: Container kube-rbac-proxy ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: Container node-exporter ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: kube-proxy-htwg4 from kube-system started at 2019-06-04 14:59:30 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:10.448: INFO: Container kube-proxy ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: canal-6zg8m from kube-system started at 2019-06-04 14:59:30 +0000 UTC (3 container statuses recorded) +Jun 4 15:56:10.448: INFO: Container calico-node ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: Container install-cni ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: Container kube-flannel ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: node-local-dns-wslm4 from kube-system started at 2019-06-04 15:00:11 +0000 UTC (1 container statuses recorded) +Jun 4 15:56:10.448: INFO: Container node-cache ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: sonobuoy-e2e-job-eb1ef483a117445f from heptio-sonobuoy started at 2019-06-04 15:54:24 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:10.448: INFO: Container e2e ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-psdr6 from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded) +Jun 4 15:56:10.448: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 15:56:10.448: INFO: Container systemd-logs ready: true, restart count 0 +[It] validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Trying to launch a pod without a label to get a node which can launch it. +STEP: Explicitly delete pod here to free the resource it takes. +STEP: Trying to apply a random label on the found node. +STEP: verifying the node has the label kubernetes.io/e2e-46cafbdf-86e1-11e9-a2b6-96b18e3e6fac 42 +STEP: Trying to relaunch the pod, now with labels. +STEP: removing the label kubernetes.io/e2e-46cafbdf-86e1-11e9-a2b6-96b18e3e6fac off the node ip-172-31-9-156.eu-central-1.compute.internal +STEP: verifying the node doesn't have the label kubernetes.io/e2e-46cafbdf-86e1-11e9-a2b6-96b18e3e6fac +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:56:14.691: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-7486" for this suite. +Jun 4 15:56:22.724: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:56:23.239: INFO: namespace sched-pred-7486 deletion completed in 8.541574594s +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70 + +• [SLOW TEST:13.695 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22 + validates that NodeSelector is respected if matching [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run default + should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:56:23.239: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl run default + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1318 +[It] should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 4 15:56:23.335: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-6089' +Jun 4 15:56:23.439: INFO: stderr: "kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 4 15:56:23.439: INFO: stdout: "deployment.apps/e2e-test-nginx-deployment created\n" +STEP: verifying the pod controlled by e2e-test-nginx-deployment gets created +[AfterEach] [k8s.io] Kubectl run default + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1324 +Jun 4 15:56:25.457: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete deployment e2e-test-nginx-deployment --namespace=kubectl-6089' +Jun 4 15:56:25.717: INFO: stderr: "" +Jun 4 15:56:25.717: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:56:25.717: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-6089" for this suite. +Jun 4 15:56:31.930: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:56:32.449: INFO: namespace kubectl-6089 deletion completed in 6.699194105s + +• [SLOW TEST:9.209 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run default + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create an rc or deployment from an image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:56:32.449: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name secret-test-52c03d9a-86e1-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume secrets +Jun 4 15:56:32.587: INFO: Waiting up to 5m0s for pod "pod-secrets-52c2e399-86e1-11e9-a2b6-96b18e3e6fac" in namespace "secrets-7246" to be "success or failure" +Jun 4 15:56:32.630: INFO: Pod "pod-secrets-52c2e399-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 42.999387ms +Jun 4 15:56:34.734: INFO: Pod "pod-secrets-52c2e399-86e1-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.146850959s +STEP: Saw pod success +Jun 4 15:56:34.734: INFO: Pod "pod-secrets-52c2e399-86e1-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 15:56:34.739: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-52c2e399-86e1-11e9-a2b6-96b18e3e6fac container secret-volume-test: +STEP: delete the pod +Jun 4 15:56:34.972: INFO: Waiting for pod pod-secrets-52c2e399-86e1-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 15:56:35.030: INFO: Pod pod-secrets-52c2e399-86e1-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:56:35.030: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-7246" for this suite. +Jun 4 15:56:41.069: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:56:41.595: INFO: namespace secrets-7246 deletion completed in 6.557778472s + +• [SLOW TEST:9.146 seconds] +[sig-storage] Secrets +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl rolling-update + should support rolling-update to same image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:56:41.597: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl rolling-update + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1414 +[It] should support rolling-update to same image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 4 15:56:41.645: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=kubectl-762' +Jun 4 15:56:41.752: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 4 15:56:41.752: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n" +STEP: verifying the rc e2e-test-nginx-rc was created +Jun 4 15:56:41.762: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 0 spec.replicas 1 status.replicas 0 +Jun 4 15:56:41.776: INFO: Waiting for rc e2e-test-nginx-rc to stabilize, generation 1 observed generation 1 spec.replicas 1 status.replicas 0 +STEP: rolling-update to same image controller +Jun 4 15:56:41.787: INFO: scanned /root for discovery docs: +Jun 4 15:56:41.787: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 rolling-update e2e-test-nginx-rc --update-period=1s --image=docker.io/library/nginx:1.14-alpine --image-pull-policy=IfNotPresent --namespace=kubectl-762' +Jun 4 15:56:58.124: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n" +Jun 4 15:56:58.124: INFO: stdout: "Created e2e-test-nginx-rc-2686648b78748766c8e64e548a377090\nScaling up e2e-test-nginx-rc-2686648b78748766c8e64e548a377090 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-2686648b78748766c8e64e548a377090 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-2686648b78748766c8e64e548a377090 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n" +Jun 4 15:56:58.124: INFO: stdout: "Created e2e-test-nginx-rc-2686648b78748766c8e64e548a377090\nScaling up e2e-test-nginx-rc-2686648b78748766c8e64e548a377090 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-2686648b78748766c8e64e548a377090 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-2686648b78748766c8e64e548a377090 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n" +STEP: waiting for all containers in run=e2e-test-nginx-rc pods to come up. +Jun 4 15:56:58.124: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l run=e2e-test-nginx-rc --namespace=kubectl-762' +Jun 4 15:56:58.284: INFO: stderr: "" +Jun 4 15:56:58.284: INFO: stdout: "e2e-test-nginx-rc-2686648b78748766c8e64e548a377090-5rs76 " +Jun 4 15:56:58.284: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods e2e-test-nginx-rc-2686648b78748766c8e64e548a377090-5rs76 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "e2e-test-nginx-rc") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-762' +Jun 4 15:56:58.360: INFO: stderr: "" +Jun 4 15:56:58.360: INFO: stdout: "true" +Jun 4 15:56:58.360: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods e2e-test-nginx-rc-2686648b78748766c8e64e548a377090-5rs76 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "e2e-test-nginx-rc"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-762' +Jun 4 15:56:58.463: INFO: stderr: "" +Jun 4 15:56:58.463: INFO: stdout: "docker.io/library/nginx:1.14-alpine" +Jun 4 15:56:58.463: INFO: e2e-test-nginx-rc-2686648b78748766c8e64e548a377090-5rs76 is verified up and running +[AfterEach] [k8s.io] Kubectl rolling-update + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1420 +Jun 4 15:56:58.463: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete rc e2e-test-nginx-rc --namespace=kubectl-762' +Jun 4 15:56:58.573: INFO: stderr: "" +Jun 4 15:56:58.573: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:56:58.574: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-762" for this suite. +Jun 4 15:57:20.644: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:57:21.342: INFO: namespace kubectl-762 deletion completed in 22.754794391s + +• [SLOW TEST:39.745 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl rolling-update + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should support rolling-update to same image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:57:21.342: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-test-volume-6fda2069-86e1-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 15:57:21.434: INFO: Waiting up to 5m0s for pod "pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac" in namespace "configmap-4907" to be "success or failure" +Jun 4 15:57:21.440: INFO: Pod "pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.230755ms +Jun 4 15:57:23.445: INFO: Pod "pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010654164s +Jun 4 15:57:25.450: INFO: Pod "pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.015921333s +STEP: Saw pod success +Jun 4 15:57:25.450: INFO: Pod "pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 15:57:25.456: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac container configmap-volume-test: +STEP: delete the pod +Jun 4 15:57:25.635: INFO: Waiting for pod pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 15:57:25.640: INFO: Pod pod-configmaps-6fdb2b44-86e1-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:57:25.640: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-4907" for this suite. +Jun 4 15:57:31.744: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:57:32.464: INFO: namespace configmap-4907 deletion completed in 6.818355085s + +• [SLOW TEST:11.122 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:57:32.465: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 4 15:57:32.518: INFO: Waiting up to 5m0s for pod "downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac" in namespace "projected-6765" to be "success or failure" +Jun 4 15:57:32.536: INFO: Pod "downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 17.6366ms +Jun 4 15:57:34.542: INFO: Pod "downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023709196s +Jun 4 15:57:36.548: INFO: Pod "downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029937021s +STEP: Saw pod success +Jun 4 15:57:36.548: INFO: Pod "downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 15:57:36.639: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac container client-container: +STEP: delete the pod +Jun 4 15:57:36.737: INFO: Waiting for pod downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 15:57:36.741: INFO: Pod downwardapi-volume-767b1665-86e1-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:57:36.741: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6765" for this suite. +Jun 4 15:57:42.766: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:57:43.199: INFO: namespace projected-6765 deletion completed in 6.45194027s + +• [SLOW TEST:10.734 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's cpu request [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[sig-storage] Projected configMap + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:57:43.199: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating projection with configMap that has name projected-configmap-test-upd-7cf0c65e-86e1-11e9-a2b6-96b18e3e6fac +STEP: Creating the pod +STEP: Updating configmap projected-configmap-test-upd-7cf0c65e-86e1-11e9-a2b6-96b18e3e6fac +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:59:09.818: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-78" for this suite. +Jun 4 15:59:31.843: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:59:32.399: INFO: namespace projected-78 deletion completed in 22.57489997s + +• [SLOW TEST:109.200 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-node] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:59:32.399: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap configmap-8338/configmap-test-be16164c-86e1-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 15:59:32.655: INFO: Waiting up to 5m0s for pod "pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac" in namespace "configmap-8338" to be "success or failure" +Jun 4 15:59:32.663: INFO: Pod "pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.693283ms +Jun 4 15:59:34.750: INFO: Pod "pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.094206527s +Jun 4 15:59:36.756: INFO: Pod "pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.100704163s +STEP: Saw pod success +Jun 4 15:59:36.756: INFO: Pod "pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 15:59:36.765: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac container env-test: +STEP: delete the pod +Jun 4 15:59:36.923: INFO: Waiting for pod pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 15:59:36.928: INFO: Pod pod-configmaps-be173213-86e1-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:59:36.929: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-8338" for this suite. +Jun 4 15:59:42.975: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:59:43.756: INFO: namespace configmap-8338 deletion completed in 6.802868445s + +• [SLOW TEST:11.357 seconds] +[sig-node] ConfigMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:32 + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:59:43.756: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-test-volume-map-c4c27939-86e1-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 15:59:43.854: INFO: Waiting up to 5m0s for pod "pod-configmaps-c4c3aabb-86e1-11e9-a2b6-96b18e3e6fac" in namespace "configmap-1057" to be "success or failure" +Jun 4 15:59:43.859: INFO: Pod "pod-configmaps-c4c3aabb-86e1-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.713594ms +Jun 4 15:59:45.871: INFO: Pod "pod-configmaps-c4c3aabb-86e1-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016654418s +STEP: Saw pod success +Jun 4 15:59:45.871: INFO: Pod "pod-configmaps-c4c3aabb-86e1-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 15:59:45.875: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-c4c3aabb-86e1-11e9-a2b6-96b18e3e6fac container configmap-volume-test: +STEP: delete the pod +Jun 4 15:59:45.947: INFO: Waiting for pod pod-configmaps-c4c3aabb-86e1-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 15:59:45.952: INFO: Pod pod-configmaps-c4c3aabb-86e1-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 15:59:45.952: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-1057" for this suite. +Jun 4 15:59:51.990: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 15:59:52.373: INFO: namespace configmap-1057 deletion completed in 6.411493775s + +• [SLOW TEST:8.617 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 15:59:52.374: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +Jun 4 15:59:52.422: INFO: PodSpec: initContainers in spec.initContainers +Jun 4 16:00:41.666: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-c9e0c3b2-86e1-11e9-a2b6-96b18e3e6fac", GenerateName:"", Namespace:"init-container-434", SelfLink:"/api/v1/namespaces/init-container-434/pods/pod-init-c9e0c3b2-86e1-11e9-a2b6-96b18e3e6fac", UID:"c9eb0913-86e1-11e9-83c6-06284416dbe9", ResourceVersion:"12168", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63695260792, loc:(*time.Location)(0x8a060e0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"422498831"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-kvxqf", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc002553100), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-kvxqf", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-kvxqf", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.1", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-kvxqf", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0023e1d78), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"ip-172-31-9-162.eu-central-1.compute.internal", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0027948a0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0023e1df0)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc0023e1e10)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc0023e1e18), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc0023e1e1c)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695260792, loc:(*time.Location)(0x8a060e0)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695260792, loc:(*time.Location)(0x8a060e0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695260792, loc:(*time.Location)(0x8a060e0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695260792, loc:(*time.Location)(0x8a060e0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"172.31.9.162", PodIP:"172.25.3.12", StartTime:(*v1.Time)(0xc0022a54a0), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0027bfab0)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0027bfb20)}, Ready:false, RestartCount:3, Image:"busybox:1.29", ImageID:"docker-pullable://busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"docker://7f24dd1c298d2588e33e52d886215e2fc8492052ef516b982989246404cc59a6"}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc0022a5620), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:""}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc0022a55a0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.1", ImageID:"", ContainerID:""}}, QOSClass:"Guaranteed"}} +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:00:41.667: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-434" for this suite. +Jun 4 16:01:03.799: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:01:04.372: INFO: namespace init-container-434 deletion completed in 22.698590147s + +• [SLOW TEST:71.998 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should not start app containers if init containers fail on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:01:04.372: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-test-upd-f4d13290-86e1-11e9-a2b6-96b18e3e6fac +STEP: Creating the pod +STEP: Updating configmap configmap-test-upd-f4d13290-86e1-11e9-a2b6-96b18e3e6fac +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:02:36.042: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-452" for this suite. +Jun 4 16:02:48.361: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:02:48.697: INFO: namespace configmap-452 deletion completed in 12.632034848s + +• [SLOW TEST:104.325 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:02:48.697: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 4 16:02:48.751: INFO: Waiting up to 5m0s for pod "downwardapi-volume-32f8f474-86e2-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-2801" to be "success or failure" +Jun 4 16:02:48.757: INFO: Pod "downwardapi-volume-32f8f474-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.896971ms +Jun 4 16:02:50.766: INFO: Pod "downwardapi-volume-32f8f474-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015383408s +STEP: Saw pod success +Jun 4 16:02:50.766: INFO: Pod "downwardapi-volume-32f8f474-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:02:50.861: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downwardapi-volume-32f8f474-86e2-11e9-a2b6-96b18e3e6fac container client-container: +STEP: delete the pod +Jun 4 16:02:51.044: INFO: Waiting for pod downwardapi-volume-32f8f474-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:02:51.054: INFO: Pod downwardapi-volume-32f8f474-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:02:51.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-2801" for this suite. +Jun 4 16:02:57.085: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:02:57.770: INFO: namespace downward-api-2801 deletion completed in 6.705641635s + +• [SLOW TEST:9.073 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + deployment should support proportional scaling [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:02:57.771: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] deployment should support proportional scaling [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:02:57.964: INFO: Creating deployment "nginx-deployment" +Jun 4 16:02:57.972: INFO: Waiting for observed generation 1 +Jun 4 16:02:59.983: INFO: Waiting for all required pods to come up +Jun 4 16:02:59.989: INFO: Pod name nginx: Found 10 pods out of 10 +STEP: ensuring each pod is running +Jun 4 16:03:04.011: INFO: Waiting for deployment "nginx-deployment" to complete +Jun 4 16:03:04.023: INFO: Updating deployment "nginx-deployment" with a non-existent image +Jun 4 16:03:04.062: INFO: Updating deployment nginx-deployment +Jun 4 16:03:04.062: INFO: Waiting for observed generation 2 +Jun 4 16:03:06.168: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8 +Jun 4 16:03:06.176: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8 +Jun 4 16:03:06.181: INFO: Waiting for the first rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas +Jun 4 16:03:06.199: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0 +Jun 4 16:03:06.199: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5 +Jun 4 16:03:06.204: INFO: Waiting for the second rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas +Jun 4 16:03:06.213: INFO: Verifying that deployment "nginx-deployment" has minimum required number of available replicas +Jun 4 16:03:06.213: INFO: Scaling up the deployment "nginx-deployment" from 10 to 30 +Jun 4 16:03:06.228: INFO: Updating deployment nginx-deployment +Jun 4 16:03:06.228: INFO: Waiting for the replicasets of deployment "nginx-deployment" to have desired number of replicas +Jun 4 16:03:06.243: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20 +Jun 4 16:03:08.264: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13 +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +Jun 4 16:03:08.275: INFO: Deployment "nginx-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment,GenerateName:,Namespace:deployment-5214,SelfLink:/apis/apps/v1/namespaces/deployment-5214/deployments/nginx-deployment,UID:387e140b-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12852,Generation:3,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*30,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:33,UpdatedReplicas:13,AvailableReplicas:8,UnavailableReplicas:25,Conditions:[{Available False 2019-06-04 16:03:06 +0000 UTC 2019-06-04 16:03:06 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-06-04 16:03:06 +0000 UTC 2019-06-04 16:02:57 +0000 UTC ReplicaSetUpdated ReplicaSet "nginx-deployment-5f9595f595" is progressing.}],ReadyReplicas:8,CollisionCount:nil,},} + +Jun 4 16:03:08.284: INFO: New ReplicaSet "nginx-deployment-5f9595f595" of Deployment "nginx-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595,GenerateName:,Namespace:deployment-5214,SelfLink:/apis/apps/v1/namespaces/deployment-5214/replicasets/nginx-deployment-5f9595f595,UID:3c1c3d05-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12843,Generation:3,CreationTimestamp:2019-06-04 16:03:04 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment nginx-deployment 387e140b-86e2-11e9-83c6-06284416dbe9 0xc00232a5f7 0xc00232a5f8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*13,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:13,FullyLabeledReplicas:13,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 4 16:03:08.284: INFO: All old ReplicaSets of Deployment "nginx-deployment": +Jun 4 16:03:08.284: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8,GenerateName:,Namespace:deployment-5214,SelfLink:/apis/apps/v1/namespaces/deployment-5214/replicasets/nginx-deployment-6f478d8d8,UID:387efffc-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12851,Generation:3,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment nginx-deployment 387e140b-86e2-11e9-83c6-06284416dbe9 0xc00232a6c7 0xc00232a6c8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*20,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:20,FullyLabeledReplicas:20,ObservedGeneration:3,ReadyReplicas:8,AvailableReplicas:8,Conditions:[],},} +Jun 4 16:03:08.296: INFO: Pod "nginx-deployment-5f9595f595-7wm5v" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-7wm5v,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-7wm5v,UID:3d6f5267-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12869,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc000b710a7 0xc000b710a8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc000b711e0} {node.kubernetes.io/unreachable Exists NoExecute 0xc000b71200}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.297: INFO: Pod "nginx-deployment-5f9595f595-8pn5k" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-8pn5k,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-8pn5k,UID:3c2b1819-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12907,Generation:0,CreationTimestamp:2019-06-04 16:03:04 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc000b712d0 0xc000b712d1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc000b71350} {node.kubernetes.io/unreachable Exists NoExecute 0xc000b71370}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:04 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.297: INFO: Pod "nginx-deployment-5f9595f595-9g5tl" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-9g5tl,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-9g5tl,UID:3c1f0034-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12908,Generation:0,CreationTimestamp:2019-06-04 16:03:04 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc000b71540 0xc000b71541}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc000b715d0} {node.kubernetes.io/unreachable Exists NoExecute 0xc000b715f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:04 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.299: INFO: Pod "nginx-deployment-5f9595f595-cqpz5" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-cqpz5,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-cqpz5,UID:3d7167bd-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12865,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc000b71770 0xc000b71771}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc000b71850} {node.kubernetes.io/unreachable Exists NoExecute 0xc000b718c0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.299: INFO: Pod "nginx-deployment-5f9595f595-cs7zg" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-cs7zg,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-cs7zg,UID:3d718700-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12904,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc000b71a90 0xc000b71a91}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc000b71b30} {node.kubernetes.io/unreachable Exists NoExecute 0xc000b71c00}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.299: INFO: Pod "nginx-deployment-5f9595f595-h5rrr" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-h5rrr,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-h5rrr,UID:3c1d5e40-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12910,Generation:0,CreationTimestamp:2019-06-04 16:03:04 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc001816050 0xc001816051}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0018160c0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0018160e0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.37,StartTime:2019-06-04 16:03:04 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ErrImagePull,Message:rpc error: code = Unknown desc = Error response from daemon: manifest for nginx:404 not found,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.299: INFO: Pod "nginx-deployment-5f9595f595-mr2fz" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-mr2fz,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-mr2fz,UID:3d6f09c9-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12849,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc0018161d0 0xc0018161d1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816240} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816260}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.299: INFO: Pod "nginx-deployment-5f9595f595-ng2jn" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-ng2jn,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-ng2jn,UID:3d6d16fc-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12919,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc001816330 0xc001816331}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0018163a0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0018163c0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:172.25.0.28,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.300: INFO: Pod "nginx-deployment-5f9595f595-rjgpr" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-rjgpr,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-rjgpr,UID:3d73fa8b-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12894,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc0018164a0 0xc0018164a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816510} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816530}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.300: INFO: Pod "nginx-deployment-5f9595f595-rx7mp" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-rx7mp,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-rx7mp,UID:3d71dded-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12875,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc001816600 0xc001816601}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816670} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816690}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.300: INFO: Pod "nginx-deployment-5f9595f595-ts48g" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-ts48g,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-ts48g,UID:3d71ce05-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12903,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc001816760 0xc001816761}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0018167d0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0018167f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.300: INFO: Pod "nginx-deployment-5f9595f595-vp6nb" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-vp6nb,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-vp6nb,UID:3c29b521-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12780,Generation:0,CreationTimestamp:2019-06-04 16:03:04 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc0018168c0 0xc0018168c1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816930} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816950}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.38,StartTime:2019-06-04 16:03:04 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.300: INFO: Pod "nginx-deployment-5f9595f595-zx7qw" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-5f9595f595-zx7qw,GenerateName:nginx-deployment-5f9595f595-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-5f9595f595-zx7qw,UID:3c1f0adc-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12770,Generation:0,CreationTimestamp:2019-06-04 16:03:04 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 5f9595f595,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-5f9595f595 3c1c3d05-86e2-11e9-83c6-06284416dbe9 0xc001816a30 0xc001816a31}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816aa0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816ac0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:04 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:,StartTime:2019-06-04 16:03:04 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404 }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.300: INFO: Pod "nginx-deployment-6f478d8d8-2sdb7" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-2sdb7,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-2sdb7,UID:3d7175f8-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12868,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001816b90 0xc001816b91}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816bf0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816c10}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.301: INFO: Pod "nginx-deployment-6f478d8d8-45kk8" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-45kk8,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-45kk8,UID:3d6d87af-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12847,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001816cd7 0xc001816cd8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816d40} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816d60}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.301: INFO: Pod "nginx-deployment-6f478d8d8-66xj5" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-66xj5,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-66xj5,UID:3886c041-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12715,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001816e27 0xc001816e28}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816e90} {node.kubernetes.io/unreachable Exists NoExecute 0xc001816eb0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:01 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:01 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.35,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:03:01 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://a65677fe4daf7e6ea1e1ac12bf3fc130e005ac609ac88b5adee4bf250cc4e5c0}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.302: INFO: Pod "nginx-deployment-6f478d8d8-6w87k" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-6w87k,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-6w87k,UID:3886a694-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12667,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001816f90 0xc001816f91}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001816ff0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817010}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:172.25.3.15,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:03:00 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://a410ef0707929ca94549a6ef6a7b403825635b358c3f2a3f94464b6255f9b02e}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.302: INFO: Pod "nginx-deployment-6f478d8d8-77n57" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-77n57,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-77n57,UID:3d6f6299-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12857,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc0018170e0 0xc0018170e1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817140} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817160}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.304: INFO: Pod "nginx-deployment-6f478d8d8-fsgzx" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-fsgzx,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-fsgzx,UID:3d716e71-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12880,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817227 0xc001817228}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817290} {node.kubernetes.io/unreachable Exists NoExecute 0xc0018172b0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.304: INFO: Pod "nginx-deployment-6f478d8d8-g8mmj" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-g8mmj,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-g8mmj,UID:3d714c11-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12882,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817377 0xc001817378}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0018173e0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817400}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.305: INFO: Pod "nginx-deployment-6f478d8d8-gthgw" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-gthgw,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-gthgw,UID:3d714f2d-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12887,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc0018174c7 0xc0018174c8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817530} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817550}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.305: INFO: Pod "nginx-deployment-6f478d8d8-hvfmt" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-hvfmt,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-hvfmt,UID:3d6f5f78-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12859,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817617 0xc001817618}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817680} {node.kubernetes.io/unreachable Exists NoExecute 0xc0018176a0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.305: INFO: Pod "nginx-deployment-6f478d8d8-jsxqc" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-jsxqc,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-jsxqc,UID:3d6f4e43-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12862,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817767 0xc001817768}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc0018177d0} {node.kubernetes.io/unreachable Exists NoExecute 0xc0018177f0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.305: INFO: Pod "nginx-deployment-6f478d8d8-jxbgs" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-jxbgs,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-jxbgs,UID:3d6dab03-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12854,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc0018178c7 0xc0018178c8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817930} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817950}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.305: INFO: Pod "nginx-deployment-6f478d8d8-lcpdx" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-lcpdx,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-lcpdx,UID:3882445f-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12688,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817a17 0xc001817a18}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817a80} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817aa0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:172.25.0.26,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:03:00 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://74a6507bc4c493ba5f9c982f6c140a22623e16150f6e70bc5fe518dc497385e8}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.306: INFO: Pod "nginx-deployment-6f478d8d8-phm5b" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-phm5b,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-phm5b,UID:3d6c1064-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12918,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817b70 0xc001817b71}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817bd0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817bf0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.39,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.306: INFO: Pod "nginx-deployment-6f478d8d8-r2m4x" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-r2m4x,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-r2m4x,UID:38855360-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12671,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817cc0 0xc001817cc1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817d20} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817d40}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:172.25.3.16,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:03:00 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://95f9c08eb3e7f09c0603ec79d0435edc33257f44ee65ee44f16f529fd6db47ef}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.306: INFO: Pod "nginx-deployment-6f478d8d8-rdxts" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-rdxts,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-rdxts,UID:3885516c-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12679,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817e10 0xc001817e11}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817e70} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817e90}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:172.25.0.25,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:03:00 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://a17e76386b7ad820081324ce2d4c883a20ae5c9752f3e524de48f8767358e2e3}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.306: INFO: Pod "nginx-deployment-6f478d8d8-v9fbw" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-v9fbw,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-v9fbw,UID:3885745f-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12703,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc001817f60 0xc001817f61}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001817fc0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001817fe0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:01 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:01 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.34,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:03:01 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://963cb7493e31698c2306a45fcb57b81bfb40550e7de7e4a3d277557ba5d91cdd}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.306: INFO: Pod "nginx-deployment-6f478d8d8-vj2cf" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-vj2cf,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-vj2cf,UID:3d6f57a7-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12873,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc00199a0b0 0xc00199a0b1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc00199a110} {node.kubernetes.io/unreachable Exists NoExecute 0xc00199a130}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.306: INFO: Pod "nginx-deployment-6f478d8d8-wq5dm" is not available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-wq5dm,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-wq5dm,UID:3d715bce-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12889,Generation:0,CreationTimestamp:2019-06-04 16:03:06 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc00199a1f7 0xc00199a1f8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc00199a260} {node.kubernetes.io/unreachable Exists NoExecute 0xc00199a280}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:06 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:,StartTime:2019-06-04 16:03:06 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.306: INFO: Pod "nginx-deployment-6f478d8d8-xsjl8" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-xsjl8,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-xsjl8,UID:388196ec-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12663,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc00199a347 0xc00199a348}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-162.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc00199a3b0} {node.kubernetes.io/unreachable Exists NoExecute 0xc00199a3d0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.162,PodIP:172.25.3.14,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:02:59 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://23b0dd4d27c6321031bf61ad60ae1061b2512e2b75e260e4a73681e4672521dc}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +Jun 4 16:03:08.307: INFO: Pod "nginx-deployment-6f478d8d8-xtw4b" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-6f478d8d8-xtw4b,GenerateName:nginx-deployment-6f478d8d8-,Namespace:deployment-5214,SelfLink:/api/v1/namespaces/deployment-5214/pods/nginx-deployment-6f478d8d8-xtw4b,UID:3886ba9a-86e2-11e9-83c6-06284416dbe9,ResourceVersion:12683,Generation:0,CreationTimestamp:2019-06-04 16:02:58 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 6f478d8d8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-6f478d8d8 387efffc-86e2-11e9-83c6-06284416dbe9 0xc00199a4a0 0xc00199a4a1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-xns2r {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-xns2r,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [{default-token-xns2r true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-11-48.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc00199a500} {node.kubernetes.io/unreachable Exists NoExecute 0xc00199a520}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:03:00 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:02:58 +0000 UTC }],Message:,Reason:,HostIP:172.31.11.48,PodIP:172.25.0.27,StartTime:2019-06-04 16:02:58 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-06-04 16:03:00 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://77922c7b9ba57bf3f11e446cbd411f5e6e0bf7355b798a1089664dc026869491}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:03:08.307: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-5214" for this suite. +Jun 4 16:03:16.374: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:03:17.109: INFO: namespace deployment-5214 deletion completed in 8.794452031s + +• [SLOW TEST:19.338 seconds] +[sig-apps] Deployment +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + deployment should support proportional scaling [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Guestbook application + should create and stop a working application [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:03:17.109: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should create and stop a working application [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating all guestbook components +Jun 4 16:03:17.165: INFO: apiVersion: v1 +kind: Service +metadata: + name: redis-slave + labels: + app: redis + role: slave + tier: backend +spec: + ports: + - port: 6379 + selector: + app: redis + role: slave + tier: backend + +Jun 4 16:03:17.165: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-9462' +Jun 4 16:03:18.340: INFO: stderr: "" +Jun 4 16:03:18.340: INFO: stdout: "service/redis-slave created\n" +Jun 4 16:03:18.340: INFO: apiVersion: v1 +kind: Service +metadata: + name: redis-master + labels: + app: redis + role: master + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: master + tier: backend + +Jun 4 16:03:18.340: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-9462' +Jun 4 16:03:18.917: INFO: stderr: "" +Jun 4 16:03:18.917: INFO: stdout: "service/redis-master created\n" +Jun 4 16:03:18.917: INFO: apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + - port: 80 + selector: + app: guestbook + tier: frontend + +Jun 4 16:03:18.917: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-9462' +Jun 4 16:03:19.484: INFO: stderr: "" +Jun 4 16:03:19.485: INFO: stdout: "service/frontend created\n" +Jun 4 16:03:19.485: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend +spec: + replicas: 3 + selector: + matchLabels: + app: guestbook + tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: php-redis + image: gcr.io/google-samples/gb-frontend:v6 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access environment variables to find service host + # info, comment out the 'value: dns' line above, and uncomment the + # line below: + # value: env + ports: + - containerPort: 80 + +Jun 4 16:03:19.485: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-9462' +Jun 4 16:03:20.376: INFO: stderr: "" +Jun 4 16:03:20.376: INFO: stdout: "deployment.apps/frontend created\n" +Jun 4 16:03:20.377: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-master +spec: + replicas: 1 + selector: + matchLabels: + app: redis + role: master + tier: backend + template: + metadata: + labels: + app: redis + role: master + tier: backend + spec: + containers: + - name: master + image: gcr.io/kubernetes-e2e-test-images/redis:1.0 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + +Jun 4 16:03:20.377: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-9462' +Jun 4 16:03:20.993: INFO: stderr: "" +Jun 4 16:03:20.993: INFO: stdout: "deployment.apps/redis-master created\n" +Jun 4 16:03:20.993: INFO: apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-slave +spec: + replicas: 2 + selector: + matchLabels: + app: redis + role: slave + tier: backend + template: + metadata: + labels: + app: redis + role: slave + tier: backend + spec: + containers: + - name: slave + image: gcr.io/google-samples/gb-redisslave:v3 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access an environment variable to find the master + # service's host, comment out the 'value: dns' line above, and + # uncomment the line below: + # value: env + ports: + - containerPort: 6379 + +Jun 4 16:03:20.993: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-9462' +Jun 4 16:03:21.526: INFO: stderr: "" +Jun 4 16:03:21.526: INFO: stdout: "deployment.apps/redis-slave created\n" +STEP: validating guestbook app +Jun 4 16:03:21.526: INFO: Waiting for all frontend pods to be Running. +Jun 4 16:03:41.584: INFO: Waiting for frontend to serve content. +Jun 4 16:03:41.696: INFO: Trying to add a new entry to the guestbook. +Jun 4 16:03:41.783: INFO: Verifying that added entry can be retrieved. +STEP: using delete to clean up resources +Jun 4 16:03:42.065: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-9462' +Jun 4 16:03:42.253: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 4 16:03:42.254: INFO: stdout: "service \"redis-slave\" force deleted\n" +STEP: using delete to clean up resources +Jun 4 16:03:42.254: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-9462' +Jun 4 16:03:42.623: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 4 16:03:42.623: INFO: stdout: "service \"redis-master\" force deleted\n" +STEP: using delete to clean up resources +Jun 4 16:03:42.624: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-9462' +Jun 4 16:03:42.895: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 4 16:03:42.895: INFO: stdout: "service \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Jun 4 16:03:42.895: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-9462' +Jun 4 16:03:42.984: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 4 16:03:42.984: INFO: stdout: "deployment.apps \"frontend\" force deleted\n" +STEP: using delete to clean up resources +Jun 4 16:03:42.984: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-9462' +Jun 4 16:03:43.237: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 4 16:03:43.237: INFO: stdout: "deployment.apps \"redis-master\" force deleted\n" +STEP: using delete to clean up resources +Jun 4 16:03:43.237: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-9462' +Jun 4 16:03:43.327: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +Jun 4 16:03:43.327: INFO: stdout: "deployment.apps \"redis-slave\" force deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:03:43.327: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9462" for this suite. +Jun 4 16:04:23.375: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:04:23.968: INFO: namespace kubectl-9462 deletion completed in 40.633584361s + +• [SLOW TEST:66.859 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Guestbook application + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create and stop a working application [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:04:23.968: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward api env vars +Jun 4 16:04:24.057: INFO: Waiting up to 5m0s for pod "downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-7163" to be "success or failure" +Jun 4 16:04:24.065: INFO: Pod "downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.704984ms +Jun 4 16:04:26.079: INFO: Pod "downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021728991s +Jun 4 16:04:28.172: INFO: Pod "downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.114868316s +STEP: Saw pod success +Jun 4 16:04:28.172: INFO: Pod "downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:04:28.177: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac container dapi-container: +STEP: delete the pod +Jun 4 16:04:28.268: INFO: Waiting for pod downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:04:28.273: INFO: Pod downward-api-6bc6a339-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:04:28.273: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-7163" for this suite. +Jun 4 16:04:34.372: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:04:34.728: INFO: namespace downward-api-7163 deletion completed in 6.448899308s + +• [SLOW TEST:10.760 seconds] +[sig-node] Downward API +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSS +------------------------------ +[k8s.io] Variable Expansion + should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:04:34.728: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test env composition +Jun 4 16:04:34.870: INFO: Waiting up to 5m0s for pod "var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac" in namespace "var-expansion-8221" to be "success or failure" +Jun 4 16:04:34.876: INFO: Pod "var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.162794ms +Jun 4 16:04:36.881: INFO: Pod "var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011714758s +Jun 4 16:04:38.900: INFO: Pod "var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030368152s +STEP: Saw pod success +Jun 4 16:04:38.900: INFO: Pod "var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:04:38.906: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac container dapi-container: +STEP: delete the pod +Jun 4 16:04:39.037: INFO: Waiting for pod var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:04:39.042: INFO: Pod var-expansion-722c255f-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:04:39.042: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-8221" for this suite. +Jun 4 16:04:45.075: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:04:45.514: INFO: namespace var-expansion-8221 deletion completed in 6.465857724s + +• [SLOW TEST:10.786 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should allow composing env vars into new env vars [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:04:45.514: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod pod-subpath-test-configmap-2tc2 +STEP: Creating a pod to test atomic-volume-subpath +Jun 4 16:04:45.593: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-2tc2" in namespace "subpath-5366" to be "success or failure" +Jun 4 16:04:45.602: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Pending", Reason="", readiness=false. Elapsed: 8.97121ms +Jun 4 16:04:47.674: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Pending", Reason="", readiness=false. Elapsed: 2.080223855s +Jun 4 16:04:49.681: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 4.087638944s +Jun 4 16:04:51.780: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 6.18625744s +Jun 4 16:04:53.800: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 8.206693566s +Jun 4 16:04:55.810: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 10.216243347s +Jun 4 16:04:57.815: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 12.222148173s +Jun 4 16:04:59.871: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 14.277541372s +Jun 4 16:05:01.972: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 16.37847602s +Jun 4 16:05:04.001: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 18.407709996s +Jun 4 16:05:06.016: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Running", Reason="", readiness=true. Elapsed: 20.422799493s +Jun 4 16:05:08.022: INFO: Pod "pod-subpath-test-configmap-2tc2": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.428725282s +STEP: Saw pod success +Jun 4 16:05:08.022: INFO: Pod "pod-subpath-test-configmap-2tc2" satisfied condition "success or failure" +Jun 4 16:05:08.027: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-subpath-test-configmap-2tc2 container test-container-subpath-configmap-2tc2: +STEP: delete the pod +Jun 4 16:05:08.274: INFO: Waiting for pod pod-subpath-test-configmap-2tc2 to disappear +Jun 4 16:05:08.280: INFO: Pod pod-subpath-test-configmap-2tc2 no longer exists +STEP: Deleting pod pod-subpath-test-configmap-2tc2 +Jun 4 16:05:08.280: INFO: Deleting pod "pod-subpath-test-configmap-2tc2" in namespace "subpath-5366" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:05:08.285: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-5366" for this suite. +Jun 4 16:05:14.403: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:05:14.990: INFO: namespace subpath-5366 deletion completed in 6.663668109s + +• [SLOW TEST:29.476 seconds] +[sig-storage] Subpath +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:05:14.990: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating projection with secret that has name projected-secret-test-8a366501-86e2-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume secrets +Jun 4 16:05:15.133: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac" in namespace "projected-3285" to be "success or failure" +Jun 4 16:05:15.139: INFO: Pod "pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.991333ms +Jun 4 16:05:17.146: INFO: Pod "pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013258434s +Jun 4 16:05:19.272: INFO: Pod "pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.139284727s +STEP: Saw pod success +Jun 4 16:05:19.272: INFO: Pod "pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:05:19.283: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac container projected-secret-volume-test: +STEP: delete the pod +Jun 4 16:05:19.489: INFO: Waiting for pod pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:05:19.494: INFO: Pod pod-projected-secrets-8a37f93c-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:05:19.494: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3285" for this suite. +Jun 4 16:05:25.515: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:05:26.033: INFO: namespace projected-3285 deletion completed in 6.534208145s + +• [SLOW TEST:11.043 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:05:26.033: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +Jun 4 16:05:26.397: INFO: Number of nodes with available pods: 0 +Jun 4 16:05:26.397: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:05:27.501: INFO: Number of nodes with available pods: 0 +Jun 4 16:05:27.501: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:05:28.485: INFO: Number of nodes with available pods: 0 +Jun 4 16:05:28.485: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:05:29.410: INFO: Number of nodes with available pods: 3 +Jun 4 16:05:29.410: INFO: Number of running nodes: 3, number of available pods: 3 +STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. +Jun 4 16:05:29.477: INFO: Number of nodes with available pods: 2 +Jun 4 16:05:29.477: INFO: Node ip-172-31-9-162.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:05:30.490: INFO: Number of nodes with available pods: 2 +Jun 4 16:05:30.490: INFO: Node ip-172-31-9-162.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:05:31.488: INFO: Number of nodes with available pods: 2 +Jun 4 16:05:31.488: INFO: Node ip-172-31-9-162.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:05:32.608: INFO: Number of nodes with available pods: 3 +Jun 4 16:05:32.609: INFO: Number of running nodes: 3, number of available pods: 3 +STEP: Wait for the failed daemon pod to be completely deleted. +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-5342, will wait for the garbage collector to delete the pods +Jun 4 16:05:32.682: INFO: Deleting DaemonSet.extensions daemon-set took: 9.618517ms +Jun 4 16:05:33.082: INFO: Terminating DaemonSet.extensions daemon-set pods took: 400.174914ms +Jun 4 16:05:46.088: INFO: Number of nodes with available pods: 0 +Jun 4 16:05:46.088: INFO: Number of running nodes: 0, number of available pods: 0 +Jun 4 16:05:46.094: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-5342/daemonsets","resourceVersion":"13964"},"items":null} + +Jun 4 16:05:46.101: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-5342/pods","resourceVersion":"13964"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:05:46.132: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-5342" for this suite. +Jun 4 16:05:52.175: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:05:52.481: INFO: namespace daemonsets-5342 deletion completed in 6.343680597s + +• [SLOW TEST:26.447 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:05:52.481: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating projection with secret that has name projected-secret-test-map-a0858ab2-86e2-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume secrets +Jun 4 16:05:52.587: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac" in namespace "projected-6286" to be "success or failure" +Jun 4 16:05:52.594: INFO: Pod "pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.481057ms +Jun 4 16:05:54.602: INFO: Pod "pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014442919s +Jun 4 16:05:56.608: INFO: Pod "pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020452719s +STEP: Saw pod success +Jun 4 16:05:56.608: INFO: Pod "pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:05:56.624: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac container projected-secret-volume-test: +STEP: delete the pod +Jun 4 16:05:56.667: INFO: Waiting for pod pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:05:56.671: INFO: Pod pod-projected-secrets-a08b9f2f-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:05:56.671: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6286" for this suite. +Jun 4 16:06:02.702: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:06:03.133: INFO: namespace projected-6286 deletion completed in 6.456882173s + +• [SLOW TEST:10.652 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-scheduling] SchedulerPredicates [Serial] + validates that NodeSelector is respected if not matching [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:06:03.133: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename sched-pred +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79 +Jun 4 16:06:03.183: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready +Jun 4 16:06:03.198: INFO: Waiting for terminating namespaces to be deleted... +Jun 4 16:06:03.205: INFO: +Logging pods the kubelet thinks is on node ip-172-31-11-48.eu-central-1.compute.internal before test +Jun 4 16:06:03.402: INFO: coredns-568fd445fd-q5bsd from kube-system started at 2019-06-04 15:00:00 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container coredns ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: kube-proxy-8f464 from kube-system started at 2019-06-04 14:59:19 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container kube-proxy ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: node-local-dns-bqd4m from kube-system started at 2019-06-04 14:59:59 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container node-cache ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: canal-dqcxs from kube-system started at 2019-06-04 14:59:19 +0000 UTC (3 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container calico-node ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: Container install-cni ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: Container kube-flannel ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: coredns-568fd445fd-l7bhx from kube-system started at 2019-06-04 15:00:00 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container coredns ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-tmnxg from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: Container systemd-logs ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: node-exporter-fm98z from kube-system started at 2019-06-04 14:59:19 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container kube-rbac-proxy ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: Container node-exporter ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: openvpn-client-5bbcf59684-r2rls from kube-system started at 2019-06-04 14:59:59 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container dnat-controller ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: Container openvpn-client ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: kubernetes-dashboard-57dcd9448b-pcpsp from kube-system started at 2019-06-04 14:59:59 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.402: INFO: Container kubernetes-dashboard ready: true, restart count 0 +Jun 4 16:06:03.402: INFO: +Logging pods the kubelet thinks is on node ip-172-31-9-156.eu-central-1.compute.internal before test +Jun 4 16:06:03.529: INFO: kube-proxy-zvrkb from kube-system started at 2019-06-04 14:59:24 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.529: INFO: Container kube-proxy ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-04 15:54:23 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.529: INFO: Container kube-sonobuoy ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: node-exporter-2bq9l from kube-system started at 2019-06-04 14:59:24 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.529: INFO: Container kube-rbac-proxy ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: Container node-exporter ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: canal-5xshg from kube-system started at 2019-06-04 14:59:24 +0000 UTC (3 container statuses recorded) +Jun 4 16:06:03.529: INFO: Container calico-node ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: Container install-cni ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: Container kube-flannel ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: node-local-dns-t84xd from kube-system started at 2019-06-04 15:00:24 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.529: INFO: Container node-cache ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-bmnlh from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.529: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: Container systemd-logs ready: true, restart count 0 +Jun 4 16:06:03.529: INFO: +Logging pods the kubelet thinks is on node ip-172-31-9-162.eu-central-1.compute.internal before test +Jun 4 16:06:03.703: INFO: node-local-dns-wslm4 from kube-system started at 2019-06-04 15:00:11 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.703: INFO: Container node-cache ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-psdr6 from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.703: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: Container systemd-logs ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: kube-proxy-htwg4 from kube-system started at 2019-06-04 14:59:30 +0000 UTC (1 container statuses recorded) +Jun 4 16:06:03.703: INFO: Container kube-proxy ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: canal-6zg8m from kube-system started at 2019-06-04 14:59:30 +0000 UTC (3 container statuses recorded) +Jun 4 16:06:03.703: INFO: Container calico-node ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: Container install-cni ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: Container kube-flannel ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: sonobuoy-e2e-job-eb1ef483a117445f from heptio-sonobuoy started at 2019-06-04 15:54:24 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.703: INFO: Container e2e ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: Container sonobuoy-worker ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: node-exporter-gkmxz from kube-system started at 2019-06-04 14:59:30 +0000 UTC (2 container statuses recorded) +Jun 4 16:06:03.703: INFO: Container kube-rbac-proxy ready: true, restart count 0 +Jun 4 16:06:03.703: INFO: Container node-exporter ready: true, restart count 0 +[It] validates that NodeSelector is respected if not matching [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Trying to schedule Pod with nonempty NodeSelector. +STEP: Considering event: +Type = [Warning], Name = [restricted-pod.15a50a79afd5f7cd], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 node(s) didn't match node selector.] +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:06:04.740: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "sched-pred-7719" for this suite. +Jun 4 16:06:10.792: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:06:11.284: INFO: namespace sched-pred-7719 deletion completed in 6.538214276s +[AfterEach] [sig-scheduling] SchedulerPredicates [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70 + +• [SLOW TEST:8.151 seconds] +[sig-scheduling] SchedulerPredicates [Serial] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22 + validates that NodeSelector is respected if not matching [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSS +------------------------------ +[k8s.io] Docker Containers + should use the image defaults if command and args are blank [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Docker Containers + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:06:11.285: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename containers +STEP: Waiting for a default service account to be provisioned in namespace +[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test use defaults +Jun 4 16:06:11.389: INFO: Waiting up to 5m0s for pod "client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac" in namespace "containers-2037" to be "success or failure" +Jun 4 16:06:11.398: INFO: Pod "client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.304266ms +Jun 4 16:06:13.403: INFO: Pod "client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013821123s +Jun 4 16:06:15.482: INFO: Pod "client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.092395804s +STEP: Saw pod success +Jun 4 16:06:15.482: INFO: Pod "client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:06:15.490: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac container test-container: +STEP: delete the pod +Jun 4 16:06:15.528: INFO: Waiting for pod client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:06:15.533: INFO: Pod client-containers-abc11c2f-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:06:15.533: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "containers-2037" for this suite. +Jun 4 16:06:21.582: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:06:22.135: INFO: namespace containers-2037 deletion completed in 6.597129929s + +• [SLOW TEST:10.850 seconds] +[k8s.io] Docker Containers +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should use the image defaults if command and args are blank [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:06:22.135: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 4 16:06:22.264: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac" in namespace "projected-7905" to be "success or failure" +Jun 4 16:06:22.279: INFO: Pod "downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 15.371193ms +Jun 4 16:06:24.383: INFO: Pod "downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.119276746s +Jun 4 16:06:26.577: INFO: Pod "downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.312829438s +STEP: Saw pod success +Jun 4 16:06:26.577: INFO: Pod "downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:06:26.583: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac container client-container: +STEP: delete the pod +Jun 4 16:06:26.622: INFO: Waiting for pod downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:06:26.629: INFO: Pod downwardapi-volume-b23bda5b-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:06:26.630: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-7905" for this suite. +Jun 4 16:06:32.654: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:06:33.134: INFO: namespace projected-7905 deletion completed in 6.49884496s + +• [SLOW TEST:10.999 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:06:33.135: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 4 16:06:33.194: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-6026" to be "success or failure" +Jun 4 16:06:33.199: INFO: Pod "downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.380515ms +Jun 4 16:06:35.283: INFO: Pod "downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.089357934s +Jun 4 16:06:37.289: INFO: Pod "downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.095450803s +STEP: Saw pod success +Jun 4 16:06:37.289: INFO: Pod "downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:06:37.294: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac container client-container: +STEP: delete the pod +Jun 4 16:06:37.494: INFO: Waiting for pod downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:06:37.500: INFO: Pod downwardapi-volume-b8bffcea-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:06:37.501: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-6026" for this suite. +Jun 4 16:06:43.537: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:06:44.078: INFO: namespace downward-api-6026 deletion completed in 6.563491124s + +• [SLOW TEST:10.943 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:06:44.078: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test emptydir 0644 on node default medium +Jun 4 16:06:44.190: INFO: Waiting up to 5m0s for pod "pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-8083" to be "success or failure" +Jun 4 16:06:44.195: INFO: Pod "pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.705023ms +Jun 4 16:06:46.202: INFO: Pod "pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011624518s +Jun 4 16:06:48.208: INFO: Pod "pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.017949991s +STEP: Saw pod success +Jun 4 16:06:48.209: INFO: Pod "pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:06:48.214: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac container test-container: +STEP: delete the pod +Jun 4 16:06:48.250: INFO: Waiting for pod pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:06:48.255: INFO: Pod pod-bf4e50ca-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:06:48.255: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-8083" for this suite. +Jun 4 16:06:54.278: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:06:54.899: INFO: namespace emptydir-8083 deletion completed in 6.638713954s + +• [SLOW TEST:10.821 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41 + should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:06:54.900: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:265 +[It] should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the initial replication controller +Jun 4 16:06:55.036: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-2371' +Jun 4 16:06:55.813: INFO: stderr: "" +Jun 4 16:06:55.813: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 4 16:06:55.813: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-2371' +Jun 4 16:06:55.924: INFO: stderr: "" +Jun 4 16:06:55.925: INFO: stdout: "update-demo-nautilus-dl2sh update-demo-nautilus-spvj7 " +Jun 4 16:06:55.925: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-dl2sh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:06:56.207: INFO: stderr: "" +Jun 4 16:06:56.207: INFO: stdout: "" +Jun 4 16:06:56.207: INFO: update-demo-nautilus-dl2sh is created but not running +Jun 4 16:07:01.207: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-2371' +Jun 4 16:07:01.384: INFO: stderr: "" +Jun 4 16:07:01.384: INFO: stdout: "update-demo-nautilus-dl2sh update-demo-nautilus-spvj7 " +Jun 4 16:07:01.384: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-dl2sh -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:01.492: INFO: stderr: "" +Jun 4 16:07:01.492: INFO: stdout: "true" +Jun 4 16:07:01.493: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-dl2sh -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:01.687: INFO: stderr: "" +Jun 4 16:07:01.687: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 4 16:07:01.687: INFO: validating pod update-demo-nautilus-dl2sh +Jun 4 16:07:01.889: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 4 16:07:01.889: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 4 16:07:01.889: INFO: update-demo-nautilus-dl2sh is verified up and running +Jun 4 16:07:01.889: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-spvj7 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:02.150: INFO: stderr: "" +Jun 4 16:07:02.150: INFO: stdout: "true" +Jun 4 16:07:02.150: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-spvj7 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:02.425: INFO: stderr: "" +Jun 4 16:07:02.425: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +Jun 4 16:07:02.425: INFO: validating pod update-demo-nautilus-spvj7 +Jun 4 16:07:02.606: INFO: got data: { + "image": "nautilus.jpg" +} + +Jun 4 16:07:02.606: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +Jun 4 16:07:02.606: INFO: update-demo-nautilus-spvj7 is verified up and running +STEP: rolling-update to new replication controller +Jun 4 16:07:02.607: INFO: scanned /root for discovery docs: +Jun 4 16:07:02.607: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 rolling-update update-demo-nautilus --update-period=1s -f - --namespace=kubectl-2371' +Jun 4 16:07:25.798: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n" +Jun 4 16:07:25.798: INFO: stdout: "Created update-demo-kitten\nScaling up update-demo-kitten from 0 to 2, scaling down update-demo-nautilus from 2 to 0 (keep 2 pods available, don't exceed 3 pods)\nScaling update-demo-kitten up to 1\nScaling update-demo-nautilus down to 1\nScaling update-demo-kitten up to 2\nScaling update-demo-nautilus down to 0\nUpdate succeeded. Deleting old controller: update-demo-nautilus\nRenaming update-demo-kitten to update-demo-nautilus\nreplicationcontroller/update-demo-nautilus rolling updated\n" +STEP: waiting for all containers in name=update-demo pods to come up. +Jun 4 16:07:25.798: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-2371' +Jun 4 16:07:25.974: INFO: stderr: "" +Jun 4 16:07:25.974: INFO: stdout: "update-demo-kitten-mbvld update-demo-kitten-z2c4d " +Jun 4 16:07:25.974: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-kitten-mbvld -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:26.056: INFO: stderr: "" +Jun 4 16:07:26.056: INFO: stdout: "true" +Jun 4 16:07:26.056: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-kitten-mbvld -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:26.147: INFO: stderr: "" +Jun 4 16:07:26.147: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0" +Jun 4 16:07:26.147: INFO: validating pod update-demo-kitten-mbvld +Jun 4 16:07:26.468: INFO: got data: { + "image": "kitten.jpg" +} + +Jun 4 16:07:26.468: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg . +Jun 4 16:07:26.468: INFO: update-demo-kitten-mbvld is verified up and running +Jun 4 16:07:26.468: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-kitten-z2c4d -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:26.683: INFO: stderr: "" +Jun 4 16:07:26.683: INFO: stdout: "true" +Jun 4 16:07:26.683: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-kitten-z2c4d -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-2371' +Jun 4 16:07:26.769: INFO: stderr: "" +Jun 4 16:07:26.769: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0" +Jun 4 16:07:26.769: INFO: validating pod update-demo-kitten-z2c4d +Jun 4 16:07:26.916: INFO: got data: { + "image": "kitten.jpg" +} + +Jun 4 16:07:26.916: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg . +Jun 4 16:07:26.916: INFO: update-demo-kitten-z2c4d is verified up and running +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:07:26.916: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2371" for this suite. +Jun 4 16:07:48.943: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:07:49.291: INFO: namespace kubectl-2371 deletion completed in 22.368436206s + +• [SLOW TEST:54.391 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Update Demo + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should do a rolling update of a replication controller [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-storage] Projected configMap + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:07:49.291: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-e63a8035-86e2-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 16:07:49.504: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac" in namespace "projected-499" to be "success or failure" +Jun 4 16:07:49.510: INFO: Pod "pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.299344ms +Jun 4 16:07:51.516: INFO: Pod "pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011306517s +Jun 4 16:07:53.588: INFO: Pod "pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.084120351s +STEP: Saw pod success +Jun 4 16:07:53.588: INFO: Pod "pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:07:53.594: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac container projected-configmap-volume-test: +STEP: delete the pod +Jun 4 16:07:53.688: INFO: Waiting for pod pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:07:53.692: INFO: Pod pod-projected-configmaps-e63bc0e1-86e2-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:07:53.692: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-499" for this suite. +Jun 4 16:07:59.715: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:07:59.948: INFO: namespace projected-499 deletion completed in 6.25042801s + +• [SLOW TEST:10.657 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl version + should check is all data is printed [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:07:59.949: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should check is all data is printed [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:07:59.992: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 version' +Jun 4 16:08:00.085: INFO: stderr: "" +Jun 4 16:08:00.085: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"14\", GitVersion:\"v1.14.1\", GitCommit:\"b7394102d6ef778017f2ca4046abbaa23b88c290\", GitTreeState:\"clean\", BuildDate:\"2019-04-08T17:11:31Z\", GoVersion:\"go1.12.1\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"14\", GitVersion:\"v1.14.1\", GitCommit:\"b7394102d6ef778017f2ca4046abbaa23b88c290\", GitTreeState:\"clean\", BuildDate:\"2019-04-08T17:02:58Z\", GoVersion:\"go1.12.1\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:08:00.085: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-9008" for this suite. +Jun 4 16:08:06.108: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:08:06.412: INFO: namespace kubectl-9008 deletion completed in 6.32148233s + +• [SLOW TEST:6.463 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl version + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should check is all data is printed [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[sig-apps] ReplicationController + should adopt matching pods on creation [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:08:06.412: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[It] should adopt matching pods on creation [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Given a Pod with a 'name' label pod-adoption is created +STEP: When a replication controller with a matching selector is created +STEP: Then the orphan pod is adopted +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:08:11.683: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-3299" for this suite. +Jun 4 16:08:33.817: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:08:34.417: INFO: namespace replication-controller-3299 deletion completed in 22.632763524s + +• [SLOW TEST:28.005 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should adopt matching pods on creation [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:08:34.417: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-test-volume-010dbeaa-86e3-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 16:08:34.511: INFO: Waiting up to 5m0s for pod "pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac" in namespace "configmap-836" to be "success or failure" +Jun 4 16:08:34.519: INFO: Pod "pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.748426ms +Jun 4 16:08:36.535: INFO: Pod "pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023928661s +Jun 4 16:08:38.589: INFO: Pod "pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.077871456s +STEP: Saw pod success +Jun 4 16:08:38.589: INFO: Pod "pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:08:38.595: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac container configmap-volume-test: +STEP: delete the pod +Jun 4 16:08:38.785: INFO: Waiting for pod pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:08:38.790: INFO: Pod pod-configmaps-010ebff1-86e3-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:08:38.790: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-836" for this suite. +Jun 4 16:08:44.821: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:08:45.504: INFO: namespace configmap-836 deletion completed in 6.702249199s + +• [SLOW TEST:11.087 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:08:45.504: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating projection with secret that has name projected-secret-test-07a4b9b1-86e3-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume secrets +Jun 4 16:08:45.562: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac" in namespace "projected-5226" to be "success or failure" +Jun 4 16:08:45.567: INFO: Pod "pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.567109ms +Jun 4 16:08:47.586: INFO: Pod "pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02360274s +Jun 4 16:08:49.591: INFO: Pod "pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029250995s +STEP: Saw pod success +Jun 4 16:08:49.591: INFO: Pod "pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:08:49.596: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac container projected-secret-volume-test: +STEP: delete the pod +Jun 4 16:08:49.887: INFO: Waiting for pod pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:08:49.986: INFO: Pod pod-projected-secrets-07a61616-86e3-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:08:49.986: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-5226" for this suite. +Jun 4 16:08:56.196: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:08:56.542: INFO: namespace projected-5226 deletion completed in 6.55031776s + +• [SLOW TEST:11.038 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicationController + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:08:56.544: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating replication controller my-hostname-basic-0e38c44e-86e3-11e9-a2b6-96b18e3e6fac +Jun 4 16:08:56.606: INFO: Pod name my-hostname-basic-0e38c44e-86e3-11e9-a2b6-96b18e3e6fac: Found 1 pods out of 1 +Jun 4 16:08:56.606: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-0e38c44e-86e3-11e9-a2b6-96b18e3e6fac" are running +Jun 4 16:09:00.620: INFO: Pod "my-hostname-basic-0e38c44e-86e3-11e9-a2b6-96b18e3e6fac-l7x9j" is running (conditions: [{Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-04 16:08:56 +0000 UTC Reason: Message:}]) +Jun 4 16:09:00.620: INFO: Trying to dial the pod +Jun 4 16:09:05.887: INFO: Controller my-hostname-basic-0e38c44e-86e3-11e9-a2b6-96b18e3e6fac: Got expected result from replica 1 [my-hostname-basic-0e38c44e-86e3-11e9-a2b6-96b18e3e6fac-l7x9j]: "my-hostname-basic-0e38c44e-86e3-11e9-a2b6-96b18e3e6fac-l7x9j", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:09:05.887: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-8739" for this suite. +Jun 4 16:09:11.916: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:09:12.246: INFO: namespace replication-controller-8739 deletion completed in 6.352017269s + +• [SLOW TEST:15.702 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for services [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-network] DNS + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:09:12.246: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for services [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-6242.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-6242.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.dns-6242.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.dns-6242.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-6242.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 47.10.10.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.10.10.47_udp@PTR;check="$$(dig +tcp +noall +answer +search 47.10.10.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.10.10.47_tcp@PTR;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service.dns-6242.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +tcp +noall +answer +search dns-test-service.dns-6242.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.dns-6242.svc.cluster.local;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.dns-6242.svc.cluster.local SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.dns-6242.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-6242.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 47.10.10.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.10.10.47_udp@PTR;check="$$(dig +tcp +noall +answer +search 47.10.10.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.10.10.47_tcp@PTR;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Jun 4 16:09:26.669: INFO: Unable to read wheezy_tcp@dns-test-service.dns-6242.svc.cluster.local from pod dns-6242/dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac: the server could not find the requested resource (get pods dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac) +Jun 4 16:09:26.689: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local from pod dns-6242/dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac: the server could not find the requested resource (get pods dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac) +Jun 4 16:09:26.726: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local from pod dns-6242/dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac: the server could not find the requested resource (get pods dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac) +Jun 4 16:09:27.389: INFO: Unable to read jessie_udp@dns-test-service.dns-6242.svc.cluster.local from pod dns-6242/dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac: the server could not find the requested resource (get pods dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac) +Jun 4 16:09:28.166: INFO: Lookups using dns-6242/dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac failed for: [wheezy_tcp@dns-test-service.dns-6242.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-6242.svc.cluster.local jessie_udp@dns-test-service.dns-6242.svc.cluster.local] + +Jun 4 16:09:35.026: INFO: DNS probes using dns-6242/dns-test-17a7d883-86e3-11e9-a2b6-96b18e3e6fac succeeded + +STEP: deleting the pod +STEP: deleting the test service +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:09:35.118: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-6242" for this suite. +Jun 4 16:09:41.391: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:09:42.007: INFO: namespace dns-6242 deletion completed in 6.877155739s + +• [SLOW TEST:29.761 seconds] +[sig-network] DNS +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should provide DNS for services [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Namespaces [Serial] + should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Namespaces [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:09:42.007: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename namespaces +STEP: Waiting for a default service account to be provisioned in namespace +[It] should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a test namespace +STEP: Waiting for a default service account to be provisioned in namespace +STEP: Creating a service in the namespace +STEP: Deleting the namespace +STEP: Waiting for the namespace to be removed. +STEP: Recreating the namespace +STEP: Verifying there is no service in the namespace +[AfterEach] [sig-api-machinery] Namespaces [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:09:48.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "namespaces-654" for this suite. +Jun 4 16:09:54.586: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:09:55.600: INFO: namespace namespaces-654 deletion completed in 7.131675156s +STEP: Destroying namespace "nsdeletetest-7730" for this suite. +Jun 4 16:09:55.612: INFO: Namespace nsdeletetest-7730 was already deleted +STEP: Destroying namespace "nsdeletetest-5188" for this suite. +Jun 4 16:10:01.730: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:10:02.283: INFO: namespace nsdeletetest-5188 deletion completed in 6.671543444s + +• [SLOW TEST:20.276 seconds] +[sig-api-machinery] Namespaces [Serial] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should ensure that all services are removed when a namespace is deleted [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSS +------------------------------ +[sig-api-machinery] Garbage collector + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:10:02.283: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: create the rc1 +STEP: create the rc2 +STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well +STEP: delete the rc simpletest-rc-to-be-deleted +STEP: wait for the rc to be deleted +STEP: Gathering metrics +W0604 16:10:13.250017 15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 4 16:10:13.250: INFO: For apiserver_request_total: +For apiserver_request_latencies_summary: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:10:13.250: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-6546" for this suite. +Jun 4 16:10:21.481: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:10:22.335: INFO: namespace gc-6546 deletion completed in 9.001532942s + +• [SLOW TEST:20.051 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:10:22.335: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod pod-subpath-test-secret-57tk +STEP: Creating a pod to test atomic-volume-subpath +Jun 4 16:10:22.509: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-57tk" in namespace "subpath-9311" to be "success or failure" +Jun 4 16:10:22.538: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Pending", Reason="", readiness=false. Elapsed: 28.627572ms +Jun 4 16:10:24.543: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Pending", Reason="", readiness=false. Elapsed: 2.033692729s +Jun 4 16:10:26.554: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 4.045031599s +Jun 4 16:10:28.561: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 6.051646552s +Jun 4 16:10:30.681: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 8.171556268s +Jun 4 16:10:32.687: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 10.177577789s +Jun 4 16:10:34.693: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 12.183859634s +Jun 4 16:10:36.700: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 14.190576891s +Jun 4 16:10:38.714: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 16.205000187s +Jun 4 16:10:40.720: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 18.210363105s +Jun 4 16:10:42.781: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 20.272083938s +Jun 4 16:10:44.883: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Running", Reason="", readiness=true. Elapsed: 22.373684113s +Jun 4 16:10:47.278: INFO: Pod "pod-subpath-test-secret-57tk": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.768700836s +STEP: Saw pod success +Jun 4 16:10:47.278: INFO: Pod "pod-subpath-test-secret-57tk" satisfied condition "success or failure" +Jun 4 16:10:47.289: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-subpath-test-secret-57tk container test-container-subpath-secret-57tk: +STEP: delete the pod +Jun 4 16:10:47.377: INFO: Waiting for pod pod-subpath-test-secret-57tk to disappear +Jun 4 16:10:47.382: INFO: Pod pod-subpath-test-secret-57tk no longer exists +STEP: Deleting pod pod-subpath-test-secret-57tk +Jun 4 16:10:47.382: INFO: Deleting pod "pod-subpath-test-secret-57tk" in namespace "subpath-9311" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:10:47.387: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "subpath-9311" for this suite. +Jun 4 16:10:53.496: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:10:54.411: INFO: namespace subpath-9311 deletion completed in 7.017036892s + +• [SLOW TEST:32.076 seconds] +[sig-storage] Subpath +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with secret pod [LinuxOnly] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:10:54.411: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating projection with secret that has name projected-secret-test-547c3006-86e3-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume secrets +Jun 4 16:10:54.495: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac" in namespace "projected-2611" to be "success or failure" +Jun 4 16:10:54.500: INFO: Pod "pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.826466ms +Jun 4 16:10:56.507: INFO: Pod "pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012020594s +Jun 4 16:10:58.513: INFO: Pod "pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018208925s +STEP: Saw pod success +Jun 4 16:10:58.513: INFO: Pod "pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:10:58.519: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac container projected-secret-volume-test: +STEP: delete the pod +Jun 4 16:10:58.812: INFO: Waiting for pod pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:10:58.873: INFO: Pod pod-projected-secrets-547ec963-86e3-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:10:58.873: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-2611" for this suite. +Jun 4 16:11:04.906: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:11:05.535: INFO: namespace projected-2611 deletion completed in 6.653814723s + +• [SLOW TEST:11.124 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + should have monotonically increasing restart count [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:11:05.535: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51 +[It] should have monotonically increasing restart count [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating pod liveness-http in namespace container-probe-4715 +Jun 4 16:11:11.904: INFO: Started pod liveness-http in namespace container-probe-4715 +STEP: checking the pod's current state and verifying that restartCount is present +Jun 4 16:11:11.911: INFO: Initial restart count of pod liveness-http is 0 +Jun 4 16:11:24.074: INFO: Restart count of pod container-probe-4715/liveness-http is now 1 (12.162904247s elapsed) +Jun 4 16:11:44.473: INFO: Restart count of pod container-probe-4715/liveness-http is now 2 (32.562365404s elapsed) +Jun 4 16:12:04.783: INFO: Restart count of pod container-probe-4715/liveness-http is now 3 (52.87181966s elapsed) +Jun 4 16:12:25.010: INFO: Restart count of pod container-probe-4715/liveness-http is now 4 (1m13.098548581s elapsed) +Jun 4 16:13:35.975: INFO: Restart count of pod container-probe-4715/liveness-http is now 5 (2m24.064378382s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:13:36.000: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-4715" for this suite. +Jun 4 16:13:42.031: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:13:42.482: INFO: namespace container-probe-4715 deletion completed in 6.475698195s + +• [SLOW TEST:156.947 seconds] +[k8s.io] Probing container +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should have monotonically increasing restart count [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[k8s.io] Pods + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:13:42.483: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135 +[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +Jun 4 16:13:47.164: INFO: Successfully updated pod "pod-update-activedeadlineseconds-b8acc13e-86e3-11e9-a2b6-96b18e3e6fac" +Jun 4 16:13:47.164: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-b8acc13e-86e3-11e9-a2b6-96b18e3e6fac" in namespace "pods-377" to be "terminated due to deadline exceeded" +Jun 4 16:13:47.251: INFO: Pod "pod-update-activedeadlineseconds-b8acc13e-86e3-11e9-a2b6-96b18e3e6fac": Phase="Running", Reason="", readiness=true. Elapsed: 87.107634ms +Jun 4 16:13:49.262: INFO: Pod "pod-update-activedeadlineseconds-b8acc13e-86e3-11e9-a2b6-96b18e3e6fac": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 2.097867713s +Jun 4 16:13:49.262: INFO: Pod "pod-update-activedeadlineseconds-b8acc13e-86e3-11e9-a2b6-96b18e3e6fac" satisfied condition "terminated due to deadline exceeded" +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:13:49.262: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-377" for this suite. +Jun 4 16:13:55.288: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:13:55.881: INFO: namespace pods-377 deletion completed in 6.61190481s + +• [SLOW TEST:13.398 seconds] +[k8s.io] Pods +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +S +------------------------------ +[sig-storage] ConfigMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:13:55.881: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name cm-test-opt-del-c0b0b473-86e3-11e9-a2b6-96b18e3e6fac +STEP: Creating configMap with name cm-test-opt-upd-c0b0b4b2-86e3-11e9-a2b6-96b18e3e6fac +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-c0b0b473-86e3-11e9-a2b6-96b18e3e6fac +STEP: Updating configmap cm-test-opt-upd-c0b0b4b2-86e3-11e9-a2b6-96b18e3e6fac +STEP: Creating configMap with name cm-test-opt-create-c0b0b4cb-86e3-11e9-a2b6-96b18e3e6fac +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:14:02.852: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-4421" for this suite. +Jun 4 16:14:24.958: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:14:25.562: INFO: namespace configmap-4421 deletion completed in 22.703575612s + +• [SLOW TEST:29.681 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should run and stop complex daemon [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:14:25.562: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should run and stop complex daemon [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:14:25.781: INFO: Creating daemon "daemon-set" with a node selector +STEP: Initially, daemon pods should not be running on any nodes. +Jun 4 16:14:25.860: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:25.860: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Change node label to blue, check that daemon pod is launched. +Jun 4 16:14:25.895: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:25.895: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:14:26.901: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:26.901: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:14:27.926: INFO: Number of nodes with available pods: 1 +Jun 4 16:14:27.926: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Update the node label to green, and wait for daemons to be unscheduled +Jun 4 16:14:27.952: INFO: Number of nodes with available pods: 1 +Jun 4 16:14:27.952: INFO: Number of running nodes: 0, number of available pods: 1 +Jun 4 16:14:28.958: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:28.958: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate +Jun 4 16:14:29.059: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:29.059: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:14:30.064: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:30.064: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:14:31.066: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:31.066: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:14:32.147: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:32.148: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:14:33.252: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:33.252: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod +Jun 4 16:14:34.150: INFO: Number of nodes with available pods: 1 +Jun 4 16:14:34.151: INFO: Number of running nodes: 1, number of available pods: 1 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-7951, will wait for the garbage collector to delete the pods +Jun 4 16:14:34.226: INFO: Deleting DaemonSet.extensions daemon-set took: 13.016706ms +Jun 4 16:14:34.726: INFO: Terminating DaemonSet.extensions daemon-set pods took: 500.312455ms +Jun 4 16:14:38.331: INFO: Number of nodes with available pods: 0 +Jun 4 16:14:38.331: INFO: Number of running nodes: 0, number of available pods: 0 +Jun 4 16:14:38.335: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-7951/daemonsets","resourceVersion":"16281"},"items":null} + +Jun 4 16:14:38.339: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-7951/pods","resourceVersion":"16281"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:14:38.373: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "daemonsets-7951" for this suite. +Jun 4 16:14:44.448: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:14:44.877: INFO: namespace daemonsets-7951 deletion completed in 6.499060596s + +• [SLOW TEST:19.315 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should run and stop complex daemon [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run job + should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:14:44.877: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl run job + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1510 +[It] should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 4 16:14:44.949: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 run e2e-test-nginx-job --restart=OnFailure --generator=job/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-6938' +Jun 4 16:14:45.144: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +Jun 4 16:14:45.144: INFO: stdout: "job.batch/e2e-test-nginx-job created\n" +STEP: verifying the job e2e-test-nginx-job was created +[AfterEach] [k8s.io] Kubectl run job + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1515 +Jun 4 16:14:45.209: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete jobs e2e-test-nginx-job --namespace=kubectl-6938' +Jun 4 16:14:45.497: INFO: stderr: "" +Jun 4 16:14:45.497: INFO: stdout: "job.batch \"e2e-test-nginx-job\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:14:45.497: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-6938" for this suite. +Jun 4 16:14:51.539: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:14:51.983: INFO: namespace kubectl-6938 deletion completed in 6.4714558s + +• [SLOW TEST:7.106 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run job + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create a job from an image when restart is OnFailure [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should delete RS created by deployment when not orphaning [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:14:51.983: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete RS created by deployment when not orphaning [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: create the deployment +STEP: Wait for the Deployment to create new ReplicaSet +STEP: delete the deployment +STEP: wait for all rs to be garbage collected +STEP: expected 0 rs, got 1 rs +STEP: expected 0 pods, got 2 pods +STEP: Gathering metrics +W0604 16:14:52.813808 15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +Jun 4 16:14:52.813: INFO: For apiserver_request_total: +For apiserver_request_latencies_summary: +For apiserver_init_events_total: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:14:52.813: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-9855" for this suite. +Jun 4 16:14:58.838: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:14:59.678: INFO: namespace gc-9855 deletion completed in 6.85855332s + +• [SLOW TEST:7.695 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should delete RS created by deployment when not orphaning [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Services + should serve multiport endpoints from pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-network] Services + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:14:59.680: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:86 +[It] should serve multiport endpoints from pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating service multi-endpoint-test in namespace services-2761 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-2761 to expose endpoints map[] +Jun 4 16:14:59.766: INFO: successfully validated that service multi-endpoint-test in namespace services-2761 exposes endpoints map[] (7.441014ms elapsed) +STEP: Creating pod pod1 in namespace services-2761 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-2761 to expose endpoints map[pod1:[100]] +Jun 4 16:15:02.919: INFO: successfully validated that service multi-endpoint-test in namespace services-2761 exposes endpoints map[pod1:[100]] (3.139710478s elapsed) +STEP: Creating pod pod2 in namespace services-2761 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-2761 to expose endpoints map[pod1:[100] pod2:[101]] +Jun 4 16:15:05.085: INFO: successfully validated that service multi-endpoint-test in namespace services-2761 exposes endpoints map[pod1:[100] pod2:[101]] (2.154511535s elapsed) +STEP: Deleting pod pod1 in namespace services-2761 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-2761 to expose endpoints map[pod2:[101]] +Jun 4 16:15:06.143: INFO: successfully validated that service multi-endpoint-test in namespace services-2761 exposes endpoints map[pod2:[101]] (1.04772357s elapsed) +STEP: Deleting pod pod2 in namespace services-2761 +STEP: waiting up to 3m0s for service multi-endpoint-test in namespace services-2761 to expose endpoints map[] +Jun 4 16:15:06.159: INFO: successfully validated that service multi-endpoint-test in namespace services-2761 exposes endpoints map[] (5.510097ms elapsed) +[AfterEach] [sig-network] Services + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:15:06.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "services-2761" for this suite. +Jun 4 16:15:28.210: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:15:28.653: INFO: namespace services-2761 deletion completed in 22.465179584s +[AfterEach] [sig-network] Services + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:91 + +• [SLOW TEST:28.974 seconds] +[sig-network] Services +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should serve multiport endpoints from pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +S +------------------------------ +[sig-storage] EmptyDir wrapper volumes + should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:15:28.653: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename emptydir-wrapper +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating 50 configmaps +STEP: Creating RC which spawns configmap-volume pods +Jun 4 16:15:29.672: INFO: Pod name wrapped-volume-race-f8811623-86e3-11e9-a2b6-96b18e3e6fac: Found 0 pods out of 5 +Jun 4 16:15:34.680: INFO: Pod name wrapped-volume-race-f8811623-86e3-11e9-a2b6-96b18e3e6fac: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-f8811623-86e3-11e9-a2b6-96b18e3e6fac in namespace emptydir-wrapper-9548, will wait for the garbage collector to delete the pods +Jun 4 16:15:46.842: INFO: Deleting ReplicationController wrapped-volume-race-f8811623-86e3-11e9-a2b6-96b18e3e6fac took: 39.937773ms +Jun 4 16:15:47.342: INFO: Terminating ReplicationController wrapped-volume-race-f8811623-86e3-11e9-a2b6-96b18e3e6fac pods took: 500.194136ms +STEP: Creating RC which spawns configmap-volume pods +Jun 4 16:16:26.370: INFO: Pod name wrapped-volume-race-1a4d046a-86e4-11e9-a2b6-96b18e3e6fac: Found 0 pods out of 5 +Jun 4 16:16:31.379: INFO: Pod name wrapped-volume-race-1a4d046a-86e4-11e9-a2b6-96b18e3e6fac: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-1a4d046a-86e4-11e9-a2b6-96b18e3e6fac in namespace emptydir-wrapper-9548, will wait for the garbage collector to delete the pods +Jun 4 16:16:43.636: INFO: Deleting ReplicationController wrapped-volume-race-1a4d046a-86e4-11e9-a2b6-96b18e3e6fac took: 49.888375ms +Jun 4 16:16:44.137: INFO: Terminating ReplicationController wrapped-volume-race-1a4d046a-86e4-11e9-a2b6-96b18e3e6fac pods took: 500.182828ms +STEP: Creating RC which spawns configmap-volume pods +Jun 4 16:17:20.677: INFO: Pod name wrapped-volume-race-3aa9a30d-86e4-11e9-a2b6-96b18e3e6fac: Found 0 pods out of 5 +Jun 4 16:17:25.686: INFO: Pod name wrapped-volume-race-3aa9a30d-86e4-11e9-a2b6-96b18e3e6fac: Found 5 pods out of 5 +STEP: Ensuring each pod is running +STEP: deleting ReplicationController wrapped-volume-race-3aa9a30d-86e4-11e9-a2b6-96b18e3e6fac in namespace emptydir-wrapper-9548, will wait for the garbage collector to delete the pods +Jun 4 16:17:39.805: INFO: Deleting ReplicationController wrapped-volume-race-3aa9a30d-86e4-11e9-a2b6-96b18e3e6fac took: 14.961889ms +Jun 4 16:17:40.405: INFO: Terminating ReplicationController wrapped-volume-race-3aa9a30d-86e4-11e9-a2b6-96b18e3e6fac pods took: 600.264812ms +STEP: Cleaning up the configMaps +[AfterEach] [sig-storage] EmptyDir wrapper volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:18:27.675: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-wrapper-9548" for this suite. +Jun 4 16:18:35.731: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:18:36.255: INFO: namespace emptydir-wrapper-9548 deletion completed in 8.575088625s + +• [SLOW TEST:187.602 seconds] +[sig-storage] EmptyDir wrapper volumes +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + should not cause race condition when used for configmaps [Serial] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[sig-storage] Projected downwardAPI + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:18:36.256: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 4 16:18:36.349: INFO: Waiting up to 5m0s for pod "downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac" in namespace "projected-1645" to be "success or failure" +Jun 4 16:18:36.357: INFO: Pod "downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.451296ms +Jun 4 16:18:38.363: INFO: Pod "downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014091407s +Jun 4 16:18:40.372: INFO: Pod "downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022875698s +STEP: Saw pod success +Jun 4 16:18:40.372: INFO: Pod "downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:18:40.377: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac container client-container: +STEP: delete the pod +Jun 4 16:18:40.483: INFO: Waiting for pod downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:18:40.488: INFO: Pod downwardapi-volume-67c8798f-86e4-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:18:40.488: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1645" for this suite. +Jun 4 16:18:46.515: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:18:47.068: INFO: namespace projected-1645 deletion completed in 6.571767872s + +• [SLOW TEST:10.812 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:18:47.068: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +Jun 4 16:18:47.244: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:18:50.715: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-5839" for this suite. +Jun 4 16:18:56.740: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:18:57.337: INFO: namespace init-container-5839 deletion completed in 6.61708345s + +• [SLOW TEST:10.269 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +S +------------------------------ +[k8s.io] Variable Expansion + should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:18:57.338: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test substitution in container's command +Jun 4 16:18:57.449: INFO: Waiting up to 5m0s for pod "var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac" in namespace "var-expansion-4669" to be "success or failure" +Jun 4 16:18:57.456: INFO: Pod "var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.237976ms +Jun 4 16:18:59.461: INFO: Pod "var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012619176s +Jun 4 16:19:01.467: INFO: Pod "var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018571324s +STEP: Saw pod success +Jun 4 16:19:01.467: INFO: Pod "var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:19:01.472: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac container dapi-container: +STEP: delete the pod +Jun 4 16:19:01.570: INFO: Waiting for pod var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:19:01.575: INFO: Pod var-expansion-745c2c88-86e4-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:19:01.575: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-4669" for this suite. +Jun 4 16:19:07.595: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:19:07.969: INFO: namespace var-expansion-4669 deletion completed in 6.388602552s + +• [SLOW TEST:10.631 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should allow substituting values in a container's command [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:19:07.969: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating secret with name secret-test-map-7aab6035-86e4-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume secrets +Jun 4 16:19:08.046: INFO: Waiting up to 5m0s for pod "pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac" in namespace "secrets-1354" to be "success or failure" +Jun 4 16:19:08.054: INFO: Pod "pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.958531ms +Jun 4 16:19:10.060: INFO: Pod "pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014224409s +Jun 4 16:19:12.066: INFO: Pod "pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac": Phase="Running", Reason="", readiness=true. Elapsed: 4.02029093s +Jun 4 16:19:14.129: INFO: Pod "pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.083453961s +STEP: Saw pod success +Jun 4 16:19:14.129: INFO: Pod "pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:19:14.135: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac container secret-volume-test: +STEP: delete the pod +Jun 4 16:19:14.248: INFO: Waiting for pod pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:19:14.252: INFO: Pod pod-secrets-7aace20a-86e4-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:19:14.252: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "secrets-1354" for this suite. +Jun 4 16:19:20.274: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:19:20.995: INFO: namespace secrets-1354 deletion completed in 6.737662371s + +• [SLOW TEST:13.026 seconds] +[sig-storage] Secrets +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33 + should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:19:20.996: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test emptydir 0777 on node default medium +Jun 4 16:19:21.091: INFO: Waiting up to 5m0s for pod "pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-6594" to be "success or failure" +Jun 4 16:19:21.098: INFO: Pod "pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.659053ms +Jun 4 16:19:23.225: INFO: Pod "pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.133623337s +Jun 4 16:19:25.233: INFO: Pod "pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.142088796s +STEP: Saw pod success +Jun 4 16:19:25.233: INFO: Pod "pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:19:25.238: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac container test-container: +STEP: delete the pod +Jun 4 16:19:25.343: INFO: Waiting for pod pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:19:25.429: INFO: Pod pod-8272122e-86e4-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:19:25.429: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-6594" for this suite. +Jun 4 16:19:31.455: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:19:31.792: INFO: namespace emptydir-6594 deletion completed in 6.355508299s + +• [SLOW TEST:10.797 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41 + should support (root,0777,default) [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class + should be submitted and removed [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] [sig-node] Pods Extended + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:19:31.793: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods Set QOS Class + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:177 +[It] should be submitted and removed [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying QOS class is set on the pod +[AfterEach] [k8s.io] [sig-node] Pods Extended + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:19:31.876: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-4482" for this suite. +Jun 4 16:19:53.965: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:19:54.448: INFO: namespace pods-4482 deletion completed in 22.523328016s + +• [SLOW TEST:22.655 seconds] +[k8s.io] [sig-node] Pods Extended +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + [k8s.io] Pods Set QOS Class + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should be submitted and removed [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run pod + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:19:54.448: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[BeforeEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1583 +[It] should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: running the image docker.io/library/nginx:1.14-alpine +Jun 4 16:19:54.531: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 run e2e-test-nginx-pod --restart=Never --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=kubectl-2125' +Jun 4 16:19:54.852: INFO: stderr: "" +Jun 4 16:19:54.852: INFO: stdout: "pod/e2e-test-nginx-pod created\n" +STEP: verifying the pod e2e-test-nginx-pod was created +[AfterEach] [k8s.io] Kubectl run pod + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1588 +Jun 4 16:19:55.023: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete pods e2e-test-nginx-pod --namespace=kubectl-2125' +Jun 4 16:20:01.731: INFO: stderr: "" +Jun 4 16:20:01.731: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:20:01.731: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2125" for this suite. +Jun 4 16:20:07.767: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:20:08.335: INFO: namespace kubectl-2125 deletion completed in 6.597934581s + +• [SLOW TEST:13.887 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl run pod + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create a pod from an image when restart is Never [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:20:08.336: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: validating cluster-info +Jun 4 16:20:08.438: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 cluster-info' +Jun 4 16:20:08.532: INFO: stderr: "" +Jun 4 16:20:08.532: INFO: stdout: "\x1b[0;32mKubernetes master\x1b[0m is running at \x1b[0;33mhttps://10.10.10.1:443\x1b[0m\n\x1b[0;32mKubeDNS\x1b[0m is running at \x1b[0;33mhttps://10.10.10.1:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:20:08.532: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2843" for this suite. +Jun 4 16:20:14.757: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:20:14.986: INFO: namespace kubectl-2843 deletion completed in 6.253971383s + +• [SLOW TEST:6.650 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl cluster-info + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should not be blocked by dependency circle [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:20:14.986: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not be blocked by dependency circle [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:20:15.122: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"a2a3edbd-86e4-11e9-83c6-06284416dbe9", Controller:(*bool)(0xc0031b91fa), BlockOwnerDeletion:(*bool)(0xc0031b91fb)}} +Jun 4 16:20:15.135: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"a2a1da95-86e4-11e9-83c6-06284416dbe9", Controller:(*bool)(0xc0029b78d6), BlockOwnerDeletion:(*bool)(0xc0029b78d7)}} +Jun 4 16:20:15.223: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"a2a2dc48-86e4-11e9-83c6-06284416dbe9", Controller:(*bool)(0xc0031b93da), BlockOwnerDeletion:(*bool)(0xc0031b93db)}} +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:20:20.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "gc-6794" for this suite. +Jun 4 16:20:26.331: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:20:27.035: INFO: namespace gc-6794 deletion completed in 6.786432873s + +• [SLOW TEST:12.049 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should not be blocked by dependency circle [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:20:27.036: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-a9cbde59-86e4-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 16:20:27.223: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac" in namespace "projected-3808" to be "success or failure" +Jun 4 16:20:27.235: INFO: Pod "pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 12.002411ms +Jun 4 16:20:29.245: INFO: Pod "pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022648731s +Jun 4 16:20:31.329: INFO: Pod "pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.106168995s +STEP: Saw pod success +Jun 4 16:20:31.329: INFO: Pod "pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:20:31.334: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac container projected-configmap-volume-test: +STEP: delete the pod +Jun 4 16:20:31.444: INFO: Waiting for pod pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:20:31.449: INFO: Pod pod-projected-configmaps-a9cda70d-86e4-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:20:31.449: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-3808" for this suite. +Jun 4 16:20:37.476: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:20:37.843: INFO: namespace projected-3808 deletion completed in 6.387006703s + +• [SLOW TEST:10.808 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:20:37.844: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-map-b03b4920-86e4-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 16:20:37.903: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac" in namespace "projected-6542" to be "success or failure" +Jun 4 16:20:37.910: INFO: Pod "pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.035765ms +Jun 4 16:20:39.917: INFO: Pod "pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013901345s +Jun 4 16:20:41.932: INFO: Pod "pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029125374s +STEP: Saw pod success +Jun 4 16:20:41.933: INFO: Pod "pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:20:41.939: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac container projected-configmap-volume-test: +STEP: delete the pod +Jun 4 16:20:42.071: INFO: Waiting for pod pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:20:42.079: INFO: Pod pod-projected-configmaps-b03c8f36-86e4-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:20:42.080: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-6542" for this suite. +Jun 4 16:20:48.115: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:20:48.449: INFO: namespace projected-6542 deletion completed in 6.359325787s + +• [SLOW TEST:10.605 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:20:48.449: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward api env vars +Jun 4 16:20:48.541: INFO: Waiting up to 5m0s for pod "downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-5846" to be "success or failure" +Jun 4 16:20:48.549: INFO: Pod "downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.995609ms +Jun 4 16:20:50.554: INFO: Pod "downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01351932s +Jun 4 16:20:52.560: INFO: Pod "downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018950218s +STEP: Saw pod success +Jun 4 16:20:52.560: INFO: Pod "downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:20:52.564: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac container dapi-container: +STEP: delete the pod +Jun 4 16:20:52.747: INFO: Waiting for pod downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:20:52.824: INFO: Pod downward-api-b692eef9-86e4-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:20:52.824: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-5846" for this suite. +Jun 4 16:20:58.923: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:20:59.953: INFO: namespace downward-api-5846 deletion completed in 7.12360112s + +• [SLOW TEST:11.504 seconds] +[sig-node] Downward API +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:20:59.953: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +Jun 4 16:21:10.336: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 4 16:21:10.341: INFO: Pod pod-with-poststart-http-hook still exists +Jun 4 16:21:12.341: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 4 16:21:12.520: INFO: Pod pod-with-poststart-http-hook still exists +Jun 4 16:21:14.341: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 4 16:21:14.423: INFO: Pod pod-with-poststart-http-hook still exists +Jun 4 16:21:16.341: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 4 16:21:16.346: INFO: Pod pod-with-poststart-http-hook still exists +Jun 4 16:21:18.341: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 4 16:21:18.621: INFO: Pod pod-with-poststart-http-hook still exists +Jun 4 16:21:20.341: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 4 16:21:20.346: INFO: Pod pod-with-poststart-http-hook still exists +Jun 4 16:21:22.341: INFO: Waiting for pod pod-with-poststart-http-hook to disappear +Jun 4 16:21:22.347: INFO: Pod pod-with-poststart-http-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:21:22.347: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-lifecycle-hook-9993" for this suite. +Jun 4 16:21:44.521: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:21:44.929: INFO: namespace container-lifecycle-hook-9993 deletion completed in 22.576863679s + +• [SLOW TEST:44.976 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + when create a pod with lifecycle hook + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute poststart http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSS +------------------------------ +[k8s.io] [sig-node] PreStop + should call prestop when killing a pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] [sig-node] PreStop + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:21:44.929: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename prestop +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] [sig-node] PreStop + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pre_stop.go:167 +[It] should call prestop when killing a pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating server pod server in namespace prestop-3706 +STEP: Waiting for pods to come up. +STEP: Creating tester pod tester in namespace prestop-3706 +STEP: Deleting pre-stop pod +Jun 4 16:22:00.169: INFO: Saw: { + "Hostname": "server", + "Sent": null, + "Received": { + "prestop": 1 + }, + "Errors": null, + "Log": [ + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." + ], + "StillContactingPeers": true +} +STEP: Deleting the server pod +[AfterEach] [k8s.io] [sig-node] PreStop + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:22:00.179: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "prestop-3706" for this suite. +Jun 4 16:22:40.205: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:22:40.635: INFO: namespace prestop-3706 deletion completed in 40.448530337s + +• [SLOW TEST:55.706 seconds] +[k8s.io] [sig-node] PreStop +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should call prestop when killing a pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Watchers + should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:22:40.636: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating a watch on configmaps +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: closing the watch once it receives two notifications +Jun 4 16:22:40.770: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9225,SelfLink:/api/v1/namespaces/watch-9225/configmaps/e2e-watch-test-watch-closed,UID:f97bfbfa-86e4-11e9-83c6-06284416dbe9,ResourceVersion:18857,Generation:0,CreationTimestamp:2019-06-04 16:22:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},} +Jun 4 16:22:40.770: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9225,SelfLink:/api/v1/namespaces/watch-9225/configmaps/e2e-watch-test-watch-closed,UID:f97bfbfa-86e4-11e9-83c6-06284416dbe9,ResourceVersion:18858,Generation:0,CreationTimestamp:2019-06-04 16:22:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +STEP: modifying the configmap a second time, while the watch is closed +STEP: creating a new watch on configmaps from the last resource version observed by the first watch +STEP: deleting the configmap +STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed +Jun 4 16:22:40.798: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9225,SelfLink:/api/v1/namespaces/watch-9225/configmaps/e2e-watch-test-watch-closed,UID:f97bfbfa-86e4-11e9-83c6-06284416dbe9,ResourceVersion:18859,Generation:0,CreationTimestamp:2019-06-04 16:22:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +Jun 4 16:22:40.798: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:watch-9225,SelfLink:/api/v1/namespaces/watch-9225/configmaps/e2e-watch-test-watch-closed,UID:f97bfbfa-86e4-11e9-83c6-06284416dbe9,ResourceVersion:18860,Generation:0,CreationTimestamp:2019-06-04 16:22:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:22:40.798: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "watch-9225" for this suite. +Jun 4 16:22:46.928: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:22:47.417: INFO: namespace watch-9225 deletion completed in 6.600092936s + +• [SLOW TEST:6.781 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should be able to restart watching from the last resource version observed by the previous watch [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:22:47.417: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace statefulset-3322 +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a new StatefulSet +Jun 4 16:22:47.829: INFO: Found 1 stateful pods, waiting for 3 +Jun 4 16:22:57.837: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 4 16:22:57.837: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 4 16:22:57.837: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine +Jun 4 16:22:57.880: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Not applying an update when the partition is greater than the number of replicas +STEP: Performing a canary update +Jun 4 16:23:07.941: INFO: Updating stateful set ss2 +Jun 4 16:23:07.956: INFO: Waiting for Pod statefulset-3322/ss2-2 to have revision ss2-c79899b9 update revision ss2-787997d666 +STEP: Restoring Pods to the correct revision when they are deleted +Jun 4 16:23:18.145: INFO: Found 2 stateful pods, waiting for 3 +Jun 4 16:23:28.152: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +Jun 4 16:23:28.152: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +Jun 4 16:23:28.152: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update +Jun 4 16:23:28.227: INFO: Updating stateful set ss2 +Jun 4 16:23:28.237: INFO: Waiting for Pod statefulset-3322/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666 +Jun 4 16:23:38.272: INFO: Updating stateful set ss2 +Jun 4 16:23:38.282: INFO: Waiting for StatefulSet statefulset-3322/ss2 to complete update +Jun 4 16:23:38.282: INFO: Waiting for Pod statefulset-3322/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666 +Jun 4 16:23:48.415: INFO: Waiting for StatefulSet statefulset-3322/ss2 to complete update +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +Jun 4 16:23:58.294: INFO: Deleting all statefulset in ns statefulset-3322 +Jun 4 16:23:58.301: INFO: Scaling statefulset ss2 to 0 +Jun 4 16:24:18.346: INFO: Waiting for statefulset status.replicas updated to 0 +Jun 4 16:24:18.356: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:24:18.379: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "statefulset-3322" for this suite. +Jun 4 16:24:24.417: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:24:24.831: INFO: namespace statefulset-3322 deletion completed in 6.445019687s + +• [SLOW TEST:97.414 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:24:24.832: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test emptydir volume type on tmpfs +Jun 4 16:24:24.957: INFO: Waiting up to 5m0s for pod "pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-1817" to be "success or failure" +Jun 4 16:24:24.962: INFO: Pod "pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.647068ms +Jun 4 16:24:27.018: INFO: Pod "pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.061062349s +Jun 4 16:24:29.030: INFO: Pod "pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.072557811s +Jun 4 16:24:31.036: INFO: Pod "pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.078737577s +STEP: Saw pod success +Jun 4 16:24:31.036: INFO: Pod "pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:24:31.043: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac container test-container: +STEP: delete the pod +Jun 4 16:24:31.117: INFO: Waiting for pod pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:24:31.132: INFO: Pod pod-379280dc-86e5-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:24:31.133: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "emptydir-1817" for this suite. +Jun 4 16:24:37.218: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:24:37.686: INFO: namespace emptydir-1817 deletion completed in 6.547179352s + +• [SLOW TEST:12.854 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41 + volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSS +------------------------------ +[k8s.io] Variable Expansion + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:24:37.687: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test substitution in container's args +Jun 4 16:24:37.737: INFO: Waiting up to 5m0s for pod "var-expansion-3f305b7e-86e5-11e9-a2b6-96b18e3e6fac" in namespace "var-expansion-7896" to be "success or failure" +Jun 4 16:24:37.742: INFO: Pod "var-expansion-3f305b7e-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.778891ms +Jun 4 16:24:39.747: INFO: Pod "var-expansion-3f305b7e-86e5-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.00994541s +STEP: Saw pod success +Jun 4 16:24:39.748: INFO: Pod "var-expansion-3f305b7e-86e5-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:24:39.754: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod var-expansion-3f305b7e-86e5-11e9-a2b6-96b18e3e6fac container dapi-container: +STEP: delete the pod +Jun 4 16:24:39.834: INFO: Waiting for pod var-expansion-3f305b7e-86e5-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:24:39.839: INFO: Pod var-expansion-3f305b7e-86e5-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:24:39.839: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "var-expansion-7896" for this suite. +Jun 4 16:24:45.860: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:24:46.122: INFO: namespace var-expansion-7896 deletion completed in 6.277237593s + +• [SLOW TEST:8.435 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSS +------------------------------ +[k8s.io] Pods + should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:24:46.122: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135 +[It] should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:24:46.325: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: creating the pod +STEP: submitting the pod to kubernetes +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:24:48.671: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-8764" for this suite. +Jun 4 16:25:34.736: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:25:35.246: INFO: namespace pods-8764 deletion completed in 46.528132467s + +• [SLOW TEST:49.124 seconds] +[k8s.io] Pods +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should support remote command execution over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Deployment + deployment should support rollover [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:25:35.247: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] deployment should support rollover [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:25:35.352: INFO: Pod name rollover-pod: Found 0 pods out of 1 +Jun 4 16:25:40.358: INFO: Pod name rollover-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +Jun 4 16:25:40.358: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready +Jun 4 16:25:42.513: INFO: Creating deployment "test-rollover-deployment" +Jun 4 16:25:42.559: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations +Jun 4 16:25:44.617: INFO: Check revision of new replica set for deployment "test-rollover-deployment" +Jun 4 16:25:44.632: INFO: Ensure that both replica sets have 1 created replica +Jun 4 16:25:44.729: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update +Jun 4 16:25:44.741: INFO: Updating deployment test-rollover-deployment +Jun 4 16:25:44.741: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller +Jun 4 16:25:44.825: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2 +Jun 4 16:25:44.924: INFO: Make sure deployment "test-rollover-deployment" is complete +Jun 4 16:25:44.936: INFO: all replica sets need to contain the pod-template-hash label +Jun 4 16:25:44.936: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262344, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 4 16:25:46.953: INFO: all replica sets need to contain the pod-template-hash label +Jun 4 16:25:46.953: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262344, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 4 16:25:48.955: INFO: all replica sets need to contain the pod-template-hash label +Jun 4 16:25:48.955: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262348, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 4 16:25:50.948: INFO: all replica sets need to contain the pod-template-hash label +Jun 4 16:25:50.948: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262348, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 4 16:25:53.025: INFO: all replica sets need to contain the pod-template-hash label +Jun 4 16:25:53.025: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262348, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 4 16:25:54.952: INFO: all replica sets need to contain the pod-template-hash label +Jun 4 16:25:54.952: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262348, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 4 16:25:57.024: INFO: all replica sets need to contain the pod-template-hash label +Jun 4 16:25:57.024: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262348, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695262342, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-766b4d6c9d\" is progressing."}}, CollisionCount:(*int32)(nil)} +Jun 4 16:25:59.003: INFO: +Jun 4 16:25:59.003: INFO: Ensure that both old replica sets have no replicas +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +Jun 4 16:25:59.131: INFO: Deployment "test-rollover-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment,GenerateName:,Namespace:deployment-1389,SelfLink:/apis/apps/v1/namespaces/deployment-1389/deployments/test-rollover-deployment,UID:65d362a6-86e5-11e9-83c6-06284416dbe9,ResourceVersion:19773,Generation:2,CreationTimestamp:2019-06-04 16:25:42 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-04 16:25:42 +0000 UTC 2019-06-04 16:25:42 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-04 16:25:58 +0000 UTC 2019-06-04 16:25:42 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rollover-deployment-766b4d6c9d" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},} + +Jun 4 16:25:59.136: INFO: New ReplicaSet "test-rollover-deployment-766b4d6c9d" of Deployment "test-rollover-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-766b4d6c9d,GenerateName:,Namespace:deployment-1389,SelfLink:/apis/apps/v1/namespaces/deployment-1389/replicasets/test-rollover-deployment-766b4d6c9d,UID:6726b15c-86e5-11e9-83c6-06284416dbe9,ResourceVersion:19763,Generation:2,CreationTimestamp:2019-06-04 16:25:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 65d362a6-86e5-11e9-83c6-06284416dbe9 0xc001d52057 0xc001d52058}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},} +Jun 4 16:25:59.136: INFO: All old ReplicaSets of Deployment "test-rollover-deployment": +Jun 4 16:25:59.136: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-controller,GenerateName:,Namespace:deployment-1389,SelfLink:/apis/apps/v1/namespaces/deployment-1389/replicasets/test-rollover-controller,UID:618c609c-86e5-11e9-83c6-06284416dbe9,ResourceVersion:19772,Generation:2,CreationTimestamp:2019-06-04 16:25:35 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 65d362a6-86e5-11e9-83c6-06284416dbe9 0xc00238dca7 0xc00238dca8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 4 16:25:59.136: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6455657675,GenerateName:,Namespace:deployment-1389,SelfLink:/apis/apps/v1/namespaces/deployment-1389/replicasets/test-rollover-deployment-6455657675,UID:65df5d3e-86e5-11e9-83c6-06284416dbe9,ResourceVersion:19719,Generation:2,CreationTimestamp:2019-06-04 16:25:42 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6455657675,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment 65d362a6-86e5-11e9-83c6-06284416dbe9 0xc00238df17 0xc00238df18}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6455657675,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6455657675,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +Jun 4 16:25:59.141: INFO: Pod "test-rollover-deployment-766b4d6c9d-qd7q5" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-766b4d6c9d-qd7q5,GenerateName:test-rollover-deployment-766b4d6c9d-,Namespace:deployment-1389,SelfLink:/api/v1/namespaces/deployment-1389/pods/test-rollover-deployment-766b4d6c9d-qd7q5,UID:672bfe89-86e5-11e9-83c6-06284416dbe9,ResourceVersion:19735,Generation:0,CreationTimestamp:2019-06-04 16:25:44 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 766b4d6c9d,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rollover-deployment-766b4d6c9d 6726b15c-86e5-11e9-83c6-06284416dbe9 0xc001d52c37 0xc001d52c38}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-f8gc4 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-f8gc4,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [{default-token-f8gc4 true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc001d52ca0} {node.kubernetes.io/unreachable Exists NoExecute 0xc001d52cc0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:25:44 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:25:48 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:25:48 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:25:44 +0000 UTC }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.95,StartTime:2019-06-04 16:25:44 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-04 16:25:47 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://085d5a0b53e5b3214919d8dcf1cb42da53364657a28d01628087f1f4ca6dd91b}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:25:59.141: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "deployment-1389" for this suite. +Jun 4 16:26:05.165: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:26:05.548: INFO: namespace deployment-1389 deletion completed in 6.401509019s + +• [SLOW TEST:30.301 seconds] +[sig-apps] Deployment +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + deployment should support rollover [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-network] DNS + should provide DNS for the cluster [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-network] DNS + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:26:05.549: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for the cluster [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-2549.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-2549.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +Jun 4 16:26:10.913: INFO: DNS probes using dns-2549/dns-test-73958d2f-86e5-11e9-a2b6-96b18e3e6fac succeeded + +STEP: deleting the pod +[AfterEach] [sig-network] DNS + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:26:10.935: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "dns-2549" for this suite. +Jun 4 16:26:16.964: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:26:17.523: INFO: namespace dns-2549 deletion completed in 6.58150512s + +• [SLOW TEST:11.975 seconds] +[sig-network] DNS +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should provide DNS for the cluster [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:26:17.523: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name configmap-test-volume-map-7abe933e-86e5-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 16:26:17.665: INFO: Waiting up to 5m0s for pod "pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac" in namespace "configmap-8681" to be "success or failure" +Jun 4 16:26:17.669: INFO: Pod "pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.443908ms +Jun 4 16:26:19.816: INFO: Pod "pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.15175124s +Jun 4 16:26:21.824: INFO: Pod "pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.159741661s +STEP: Saw pod success +Jun 4 16:26:21.824: INFO: Pod "pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:26:21.830: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac container configmap-volume-test: +STEP: delete the pod +Jun 4 16:26:21.947: INFO: Waiting for pod pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:26:21.952: INFO: Pod pod-configmaps-7abfa03a-86e5-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:26:21.952: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "configmap-8681" for this suite. +Jun 4 16:26:28.127: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:26:28.519: INFO: namespace configmap-8681 deletion completed in 6.504992228s + +• [SLOW TEST:10.996 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32 + should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicationController + should release no longer matching pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:26:28.519: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[It] should release no longer matching pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Given a ReplicationController is created +STEP: When the matched label of one of its pods change +Jun 4 16:26:28.625: INFO: Pod name pod-release: Found 1 pods out of 1 +STEP: Then the pod is released +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:26:28.648: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "replication-controller-4889" for this suite. +Jun 4 16:26:34.722: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:26:35.241: INFO: namespace replication-controller-4889 deletion completed in 6.582482242s + +• [SLOW TEST:6.722 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should release no longer matching pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSS +------------------------------ +[sig-auth] ServiceAccounts + should allow opting out of API token automount [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:26:35.241: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow opting out of API token automount [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: getting the auto-created API token +Jun 4 16:26:36.055: INFO: created pod pod-service-account-defaultsa +Jun 4 16:26:36.055: INFO: pod pod-service-account-defaultsa service account token volume mount: true +Jun 4 16:26:36.067: INFO: created pod pod-service-account-mountsa +Jun 4 16:26:36.067: INFO: pod pod-service-account-mountsa service account token volume mount: true +Jun 4 16:26:36.078: INFO: created pod pod-service-account-nomountsa +Jun 4 16:26:36.078: INFO: pod pod-service-account-nomountsa service account token volume mount: false +Jun 4 16:26:36.085: INFO: created pod pod-service-account-defaultsa-mountspec +Jun 4 16:26:36.085: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true +Jun 4 16:26:36.119: INFO: created pod pod-service-account-mountsa-mountspec +Jun 4 16:26:36.119: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true +Jun 4 16:26:36.132: INFO: created pod pod-service-account-nomountsa-mountspec +Jun 4 16:26:36.132: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true +Jun 4 16:26:36.143: INFO: created pod pod-service-account-defaultsa-nomountspec +Jun 4 16:26:36.143: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false +Jun 4 16:26:36.213: INFO: created pod pod-service-account-mountsa-nomountspec +Jun 4 16:26:36.213: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false +Jun 4 16:26:36.223: INFO: created pod pod-service-account-nomountsa-nomountspec +Jun 4 16:26:36.223: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false +[AfterEach] [sig-auth] ServiceAccounts + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:26:36.223: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "svcaccounts-882" for this suite. +Jun 4 16:26:42.332: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:26:42.956: INFO: namespace svcaccounts-882 deletion completed in 6.725855058s + +• [SLOW TEST:7.715 seconds] +[sig-auth] ServiceAccounts +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22 + should allow opting out of API token automount [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Pods + should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:26:42.957: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135 +[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:26:43.015: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: creating the pod +STEP: submitting the pod to kubernetes +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:26:47.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "pods-9613" for this suite. +Jun 4 16:27:35.242: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:27:35.749: INFO: namespace pods-9613 deletion completed in 48.528479248s + +• [SLOW TEST:52.792 seconds] +[k8s.io] Pods +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should support retrieving logs from the container over websockets [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl expose + should create services for rc [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:27:35.749: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should create services for rc [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating Redis RC +Jun 4 16:27:35.813: INFO: namespace kubectl-5561 +Jun 4 16:27:35.813: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-5561' +Jun 4 16:27:36.027: INFO: stderr: "" +Jun 4 16:27:36.027: INFO: stdout: "replicationcontroller/redis-master created\n" +STEP: Waiting for Redis master to start. +Jun 4 16:27:37.033: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:27:37.033: INFO: Found 0 / 1 +Jun 4 16:27:38.102: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:27:38.102: INFO: Found 0 / 1 +Jun 4 16:27:39.034: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:27:39.034: INFO: Found 0 / 1 +Jun 4 16:27:40.117: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:27:40.117: INFO: Found 1 / 1 +Jun 4 16:27:40.117: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Jun 4 16:27:40.212: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:27:40.212: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Jun 4 16:27:40.212: INFO: wait on redis-master startup in kubectl-5561 +Jun 4 16:27:40.212: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 logs redis-master-ht9l4 redis-master --namespace=kubectl-5561' +Jun 4 16:27:40.419: INFO: stderr: "" +Jun 4 16:27:40.419: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 04 Jun 16:27:38.214 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 04 Jun 16:27:38.214 # Server started, Redis version 3.2.12\n1:M 04 Jun 16:27:38.214 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 04 Jun 16:27:38.214 * The server is now ready to accept connections on port 6379\n" +STEP: exposing RC +Jun 4 16:27:40.420: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 expose rc redis-master --name=rm2 --port=1234 --target-port=6379 --namespace=kubectl-5561' +Jun 4 16:27:40.525: INFO: stderr: "" +Jun 4 16:27:40.525: INFO: stdout: "service/rm2 exposed\n" +Jun 4 16:27:40.616: INFO: Service rm2 in namespace kubectl-5561 found. +STEP: exposing service +Jun 4 16:27:42.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 expose service rm2 --name=rm3 --port=2345 --target-port=6379 --namespace=kubectl-5561' +Jun 4 16:27:42.757: INFO: stderr: "" +Jun 4 16:27:42.757: INFO: stdout: "service/rm3 exposed\n" +Jun 4 16:27:42.762: INFO: Service rm3 in namespace kubectl-5561 found. +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:27:44.917: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-5561" for this suite. +Jun 4 16:28:06.943: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:28:07.814: INFO: namespace kubectl-5561 deletion completed in 22.889759294s + +• [SLOW TEST:32.065 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl expose + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should create services for rc [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Probing container + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:28:07.815: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51 +[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:29:07.955: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "container-probe-8341" for this suite. +Jun 4 16:29:30.211: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:29:30.618: INFO: namespace container-probe-8341 deletion completed in 22.596601882s + +• [SLOW TEST:82.803 seconds] +[k8s.io] Probing container +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:29:30.618: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-edcbc94d-86e5-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 16:29:30.694: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac" in namespace "projected-8986" to be "success or failure" +Jun 4 16:29:30.712: INFO: Pod "pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 18.175614ms +Jun 4 16:29:32.717: INFO: Pod "pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023392015s +Jun 4 16:29:34.723: INFO: Pod "pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028523994s +STEP: Saw pod success +Jun 4 16:29:34.723: INFO: Pod "pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:29:34.728: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac container projected-configmap-volume-test: +STEP: delete the pod +Jun 4 16:29:34.836: INFO: Waiting for pod pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:29:34.840: INFO: Pod pod-projected-configmaps-edcdb15a-86e5-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:29:34.840: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-8986" for this suite. +Jun 4 16:29:40.864: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:29:41.182: INFO: namespace projected-8986 deletion completed in 6.337006097s + +• [SLOW TEST:10.564 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected configMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:29:41.182: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name cm-test-opt-del-f41c68ca-86e5-11e9-a2b6-96b18e3e6fac +STEP: Creating configMap with name cm-test-opt-upd-f41c6908-86e5-11e9-a2b6-96b18e3e6fac +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-f41c68ca-86e5-11e9-a2b6-96b18e3e6fac +STEP: Updating configmap cm-test-opt-upd-f41c6908-86e5-11e9-a2b6-96b18e3e6fac +STEP: Creating configMap with name cm-test-opt-create-f41c692a-86e5-11e9-a2b6-96b18e3e6fac +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:30:58.718: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-1142" for this suite. +Jun 4 16:31:22.745: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:31:23.161: INFO: namespace projected-1142 deletion completed in 24.433747745s + +• [SLOW TEST:101.979 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSS +------------------------------ +[sig-api-machinery] CustomResourceDefinition resources Simple CustomResourceDefinition + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:31:23.162: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename custom-resource-definition +STEP: Waiting for a default service account to be provisioned in namespace +[It] creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:31:23.218: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +[AfterEach] [sig-api-machinery] CustomResourceDefinition resources + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:31:24.532: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "custom-resource-definition-4856" for this suite. +Jun 4 16:31:30.561: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:31:30.891: INFO: namespace custom-resource-definition-4856 deletion completed in 6.348113653s + +• [SLOW TEST:7.729 seconds] +[sig-api-machinery] CustomResourceDefinition resources +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + Simple CustomResourceDefinition + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:35 + creating/deleting custom resource definition objects works [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SS +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:31:30.891: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating a pod to test downward API volume plugin +Jun 4 16:31:30.950: INFO: Waiting up to 5m0s for pod "downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-9923" to be "success or failure" +Jun 4 16:31:30.956: INFO: Pod "downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.477512ms +Jun 4 16:31:32.961: INFO: Pod "downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.010871484s +Jun 4 16:31:35.003: INFO: Pod "downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.052311225s +STEP: Saw pod success +Jun 4 16:31:35.003: INFO: Pod "downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:31:35.008: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac container client-container: +STEP: delete the pod +Jun 4 16:31:35.214: INFO: Waiting for pod downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:31:35.221: INFO: Pod downwardapi-volume-357b80f9-86e6-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:31:35.221: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "downward-api-9923" for this suite. +Jun 4 16:31:41.264: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:31:41.934: INFO: namespace downward-api-9923 deletion completed in 6.704344018s + +• [SLOW TEST:11.043 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl describe + should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:31:41.934: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213 +[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:31:42.115: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 version --client' +Jun 4 16:31:42.166: INFO: stderr: "" +Jun 4 16:31:42.166: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"14\", GitVersion:\"v1.14.1\", GitCommit:\"b7394102d6ef778017f2ca4046abbaa23b88c290\", GitTreeState:\"clean\", BuildDate:\"2019-04-08T17:11:31Z\", GoVersion:\"go1.12.1\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +Jun 4 16:31:42.169: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-2345' +Jun 4 16:31:42.568: INFO: stderr: "" +Jun 4 16:31:42.569: INFO: stdout: "replicationcontroller/redis-master created\n" +Jun 4 16:31:42.569: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-2345' +Jun 4 16:31:42.761: INFO: stderr: "" +Jun 4 16:31:42.761: INFO: stdout: "service/redis-master created\n" +STEP: Waiting for Redis master to start. +Jun 4 16:31:43.767: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:31:43.767: INFO: Found 0 / 1 +Jun 4 16:31:44.769: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:31:44.769: INFO: Found 0 / 1 +Jun 4 16:31:45.814: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:31:45.815: INFO: Found 1 / 1 +Jun 4 16:31:45.815: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +Jun 4 16:31:45.819: INFO: Selector matched 1 pods for map[app:redis] +Jun 4 16:31:45.819: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +Jun 4 16:31:45.819: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 describe pod redis-master-mhfbq --namespace=kubectl-2345' +Jun 4 16:31:46.024: INFO: stderr: "" +Jun 4 16:31:46.024: INFO: stdout: "Name: redis-master-mhfbq\nNamespace: kubectl-2345\nPriority: 0\nPriorityClassName: \nNode: ip-172-31-9-156.eu-central-1.compute.internal/172.31.9.156\nStart Time: Tue, 04 Jun 2019 16:31:42 +0000\nLabels: app=redis\n role=master\nAnnotations: \nStatus: Running\nIP: 172.25.2.110\nControlled By: ReplicationController/redis-master\nContainers:\n redis-master:\n Container ID: docker://83111cbb14d1ce25f5af72a4e63936113ec67168e7bf67126ec8f3802e2fa12f\n Image: gcr.io/kubernetes-e2e-test-images/redis:1.0\n Image ID: docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Tue, 04 Jun 2019 16:31:44 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from default-token-pnpv5 (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n default-token-pnpv5:\n Type: Secret (a volume populated by a Secret)\n SecretName: default-token-pnpv5\n Optional: false\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: node.kubernetes.io/not-ready:NoExecute for 300s\n node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 4s default-scheduler Successfully assigned kubectl-2345/redis-master-mhfbq to ip-172-31-9-156.eu-central-1.compute.internal\n Normal Pulled 3s kubelet, ip-172-31-9-156.eu-central-1.compute.internal Container image \"gcr.io/kubernetes-e2e-test-images/redis:1.0\" already present on machine\n Normal Created 3s kubelet, ip-172-31-9-156.eu-central-1.compute.internal Created container redis-master\n Normal Started 2s kubelet, ip-172-31-9-156.eu-central-1.compute.internal Started container redis-master\n" +Jun 4 16:31:46.024: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 describe rc redis-master --namespace=kubectl-2345' +Jun 4 16:31:46.140: INFO: stderr: "" +Jun 4 16:31:46.140: INFO: stdout: "Name: redis-master\nNamespace: kubectl-2345\nSelector: app=redis,role=master\nLabels: app=redis\n role=master\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=redis\n role=master\n Containers:\n redis-master:\n Image: gcr.io/kubernetes-e2e-test-images/redis:1.0\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 4s replication-controller Created pod: redis-master-mhfbq\n" +Jun 4 16:31:46.140: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 describe service redis-master --namespace=kubectl-2345' +Jun 4 16:31:46.422: INFO: stderr: "" +Jun 4 16:31:46.428: INFO: stdout: "Name: redis-master\nNamespace: kubectl-2345\nLabels: app=redis\n role=master\nAnnotations: \nSelector: app=redis,role=master\nType: ClusterIP\nIP: 10.10.10.144\nPort: 6379/TCP\nTargetPort: redis-server/TCP\nEndpoints: 172.25.2.110:6379\nSession Affinity: None\nEvents: \n" +Jun 4 16:31:46.434: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 describe node ip-172-31-11-48.eu-central-1.compute.internal' +Jun 4 16:31:46.648: INFO: stderr: "" +Jun 4 16:31:46.648: INFO: stdout: "Name: ip-172-31-11-48.eu-central-1.compute.internal\nRoles: \nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/instance-type=t3.medium\n beta.kubernetes.io/os=linux\n failure-domain.beta.kubernetes.io/region=eu-central-1\n failure-domain.beta.kubernetes.io/zone=eu-central-1a\n kubernetes.io/arch=amd64\n kubernetes.io/hostname=ip-172-31-11-48\n kubernetes.io/os=linux\n machine-controller/owned-by=044b80fc-86d9-11e9-af36-5a9d580622ae\nAnnotations: flannel.alpha.coreos.com/backend-data: {\"VtepMAC\":\"62:2b:77:cb:49:0f\"}\n flannel.alpha.coreos.com/backend-type: vxlan\n flannel.alpha.coreos.com/kube-subnet-manager: true\n flannel.alpha.coreos.com/public-ip: 172.31.11.48\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Tue, 04 Jun 2019 14:59:18 +0000\nTaints: \nUnschedulable: false\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n MemoryPressure False Tue, 04 Jun 2019 16:31:11 +0000 Tue, 04 Jun 2019 14:59:18 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Tue, 04 Jun 2019 16:31:11 +0000 Tue, 04 Jun 2019 14:59:18 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Tue, 04 Jun 2019 16:31:11 +0000 Tue, 04 Jun 2019 14:59:18 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Tue, 04 Jun 2019 16:31:11 +0000 Tue, 04 Jun 2019 14:59:59 +0000 KubeletReady kubelet is posting ready status. AppArmor enabled\nAddresses:\n InternalIP: 172.31.11.48\n ExternalIP: 18.185.103.163\n InternalDNS: ip-172-31-11-48.eu-central-1.compute.internal\n Hostname: ip-172-31-11-48.eu-central-1.compute.internal\n ExternalDNS: ec2-18-185-103-163.eu-central-1.compute.amazonaws.com\nCapacity:\n attachable-volumes-aws-ebs: 25\n cpu: 2\n ephemeral-storage: 25346000Ki\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3978596Ki\n pods: 110\nAllocatable:\n attachable-volumes-aws-ebs: 25\n cpu: 1800m\n ephemeral-storage: 21211389914\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3671396Ki\n pods: 110\nSystem Info:\n Machine ID: ec29edaedd57c62822d01da74c5f6d8f\n System UUID: EC29EDAE-DD57-C628-22D0-1DA74C5F6D8F\n Boot ID: 38acdd61-0e82-4586-9dac-dba95c89d43f\n Kernel Version: 4.15.0-1039-aws\n OS Image: Ubuntu 18.04.2 LTS\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: docker://18.9.2\n Kubelet Version: v1.14.1\n Kube-Proxy Version: v1.14.1\nPodCIDR: 172.25.0.0/24\nProviderID: aws:///eu-central-1a/i-09ffe7d48a11b563a\nNon-terminated Pods: (9 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE\n --------- ---- ------------ ---------- --------------- ------------- ---\n heptio-sonobuoy sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-tmnxg 0 (0%) 0 (0%) 0 (0%) 0 (0%) 37m\n kube-system canal-dqcxs 350m (19%) 100m (5%) 50Mi (1%) 50Mi (1%) 92m\n kube-system coredns-568fd445fd-l7bhx 100m (5%) 0 (0%) 70Mi (1%) 170Mi (4%) 94m\n kube-system coredns-568fd445fd-q5bsd 100m (5%) 0 (0%) 70Mi (1%) 170Mi (4%) 94m\n kube-system kube-proxy-8f464 75m (4%) 250m (13%) 50Mi (1%) 250Mi (6%) 92m\n kube-system kubernetes-dashboard-57dcd9448b-pcpsp 75m (4%) 75m (4%) 50Mi (1%) 50Mi (1%) 94m\n kube-system node-exporter-fm98z 20m (1%) 45m (2%) 48Mi (1%) 96Mi (2%) 92m\n kube-system node-local-dns-bqd4m 25m (1%) 0 (0%) 5Mi (0%) 30Mi (0%) 91m\n kube-system openvpn-client-5bbcf59684-r2rls 30m (1%) 200m (11%) 30Mi (0%) 82Mi (2%) 94m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 775m (43%) 670m (37%)\n memory 373Mi (10%) 898Mi (25%)\n ephemeral-storage 0 (0%) 0 (0%)\n attachable-volumes-aws-ebs 0 0\nEvents: \n" +Jun 4 16:31:46.648: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 describe namespace kubectl-2345' +Jun 4 16:31:46.757: INFO: stderr: "" +Jun 4 16:31:46.757: INFO: stdout: "Name: kubectl-2345\nLabels: e2e-framework=kubectl\n e2e-run=081e846b-86e1-11e9-a2b6-96b18e3e6fac\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo resource limits.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:31:46.757: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "kubectl-2345" for this suite. +Jun 4 16:32:08.836: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:32:09.302: INFO: namespace kubectl-2345 deletion completed in 22.485882664s + +• [SLOW TEST:27.368 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23 + [k8s.io] Kubectl describe + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:32:09.302: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: creating the pod +Jun 4 16:32:09.353: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:32:15.110: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "init-container-9580" for this suite. +Jun 4 16:32:37.139: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:32:37.739: INFO: namespace init-container-9580 deletion completed in 22.619340147s + +• [SLOW TEST:28.437 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687 + should invoke init containers on a RestartAlways pod [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:32:37.739: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +STEP: Creating configMap with name projected-configmap-test-volume-map-5d52a2be-86e6-11e9-a2b6-96b18e3e6fac +STEP: Creating a pod to test consume configMaps +Jun 4 16:32:37.798: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac" in namespace "projected-5233" to be "success or failure" +Jun 4 16:32:37.803: INFO: Pod "pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.957425ms +Jun 4 16:32:39.809: INFO: Pod "pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01103738s +Jun 4 16:32:41.817: INFO: Pod "pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01873074s +STEP: Saw pod success +Jun 4 16:32:41.817: INFO: Pod "pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure" +Jun 4 16:32:41.824: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac container projected-configmap-volume-test: +STEP: delete the pod +Jun 4 16:32:41.913: INFO: Waiting for pod pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac to disappear +Jun 4 16:32:41.919: INFO: Pod pod-projected-configmaps-5d53ac29-86e6-11e9-a2b6-96b18e3e6fac no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150 +Jun 4 16:32:41.919: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "projected-5233" for this suite. +Jun 4 16:32:48.112: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +Jun 4 16:32:48.655: INFO: namespace projected-5233 deletion completed in 6.720534444s + +• [SLOW TEST:10.916 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33 + should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +[BeforeEach] version v1 + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149 +STEP: Creating a kubernetes client +Jun 4 16:32:48.656: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521 +STEP: Building a namespace api object, basename proxy +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy logs on node with explicit kubelet port using proxy subresource [Conformance] + /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692 +Jun 4 16:32:48.778: INFO: (0) /api/v1/nodes/ip-172-31-11-48.eu-central-1.compute.internal:10250/proxy/logs/:
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute prestop http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the pod with lifecycle hook
+STEP: delete the pod with lifecycle hook
+Jun  4 16:33:04.247: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+Jun  4 16:33:04.253: INFO: Pod pod-with-prestop-http-hook still exists
+Jun  4 16:33:06.253: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+Jun  4 16:33:06.262: INFO: Pod pod-with-prestop-http-hook still exists
+Jun  4 16:33:08.253: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+Jun  4 16:33:08.258: INFO: Pod pod-with-prestop-http-hook still exists
+Jun  4 16:33:10.253: INFO: Waiting for pod pod-with-prestop-http-hook to disappear
+Jun  4 16:33:10.258: INFO: Pod pod-with-prestop-http-hook no longer exists
+STEP: check prestop hook
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:33:10.371: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-9088" for this suite.
+Jun  4 16:33:32.430: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:33:32.930: INFO: namespace container-lifecycle-hook-9088 deletion completed in 22.553272901s
+
+• [SLOW TEST:36.966 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute prestop http hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Burst scaling should run to completion even with unhealthy pods [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:33:32.931: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace statefulset-9041
+[It] Burst scaling should run to completion even with unhealthy pods [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating stateful set ss in namespace statefulset-9041
+STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-9041
+Jun  4 16:33:33.040: INFO: Found 0 stateful pods, waiting for 1
+Jun  4 16:33:43.047: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod
+Jun  4 16:33:43.112: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 16:33:44.419: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 16:33:44.419: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 16:33:44.419: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 16:33:44.424: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true
+Jun  4 16:33:54.431: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 16:33:54.431: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 16:33:54.458: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:33:54.458: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:45 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:45 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:33:54.458: INFO: 
+Jun  4 16:33:54.458: INFO: StatefulSet ss has not reached scale 3, at 1
+Jun  4 16:33:55.518: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.991243639s
+Jun  4 16:33:56.523: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.931224236s
+Jun  4 16:33:57.612: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.926012503s
+Jun  4 16:33:58.621: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.837628026s
+Jun  4 16:33:59.630: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.828101584s
+Jun  4 16:34:00.637: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.819073828s
+Jun  4 16:34:01.717: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.81264247s
+Jun  4 16:34:02.812: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.732005049s
+Jun  4 16:34:03.819: INFO: Verifying statefulset ss doesn't scale past 3 for another 637.18784ms
+STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-9041
+Jun  4 16:34:04.825: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:34:06.095: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  4 16:34:06.095: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 16:34:06.095: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 16:34:06.095: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:34:07.219: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n"
+Jun  4 16:34:07.219: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 16:34:07.219: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 16:34:07.219: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:34:08.203: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\nmv: can't rename '/tmp/index.html': No such file or directory\n+ true\n"
+Jun  4 16:34:08.203: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 16:34:08.203: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 16:34:08.218: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun  4 16:34:08.218: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun  4 16:34:08.218: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Scale down will not halt with unhealthy stateful pod
+Jun  4 16:34:08.312: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 16:34:09.138: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 16:34:09.138: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 16:34:09.138: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 16:34:09.138: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 16:34:10.056: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 16:34:10.056: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 16:34:10.056: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 16:34:10.056: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-2 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 16:34:11.038: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 16:34:11.038: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 16:34:11.038: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 16:34:11.038: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 16:34:11.117: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 1
+Jun  4 16:34:21.131: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 16:34:21.131: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 16:34:21.131: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 16:34:21.151: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:21.151: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:21.151: INFO: ss-1  ip-172-31-9-162.eu-central-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:21.151: INFO: ss-2  ip-172-31-11-48.eu-central-1.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:21.151: INFO: 
+Jun  4 16:34:21.151: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun  4 16:34:22.212: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:22.212: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:22.212: INFO: ss-1  ip-172-31-9-162.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:22.212: INFO: ss-2  ip-172-31-11-48.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:22.212: INFO: 
+Jun  4 16:34:22.212: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun  4 16:34:23.218: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:23.218: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:23.218: INFO: ss-1  ip-172-31-9-162.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:23.218: INFO: ss-2  ip-172-31-11-48.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:23.218: INFO: 
+Jun  4 16:34:23.218: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun  4 16:34:24.225: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:24.225: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:24.225: INFO: ss-1  ip-172-31-9-162.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:24.225: INFO: ss-2  ip-172-31-11-48.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:24.225: INFO: 
+Jun  4 16:34:24.225: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun  4 16:34:25.231: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:25.231: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:25.231: INFO: ss-1  ip-172-31-9-162.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:25.231: INFO: ss-2  ip-172-31-11-48.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:11 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:25.231: INFO: 
+Jun  4 16:34:25.231: INFO: StatefulSet ss has not reached scale 0, at 3
+Jun  4 16:34:26.239: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:26.239: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:26.239: INFO: ss-1  ip-172-31-9-162.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:26.239: INFO: 
+Jun  4 16:34:26.239: INFO: StatefulSet ss has not reached scale 0, at 2
+Jun  4 16:34:27.247: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:27.247: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:27.247: INFO: ss-1  ip-172-31-9-162.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:54 +0000 UTC  }]
+Jun  4 16:34:27.247: INFO: 
+Jun  4 16:34:27.247: INFO: StatefulSet ss has not reached scale 0, at 2
+Jun  4 16:34:28.294: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:28.294: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:28.295: INFO: 
+Jun  4 16:34:28.295: INFO: StatefulSet ss has not reached scale 0, at 1
+Jun  4 16:34:29.301: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:29.302: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:29.302: INFO: 
+Jun  4 16:34:29.302: INFO: StatefulSet ss has not reached scale 0, at 1
+Jun  4 16:34:30.317: INFO: POD   NODE                                           PHASE    GRACE  CONDITIONS
+Jun  4 16:34:30.317: INFO: ss-0  ip-172-31-9-156.eu-central-1.compute.internal  Running  30s    [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:34:10 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:33:33 +0000 UTC  }]
+Jun  4 16:34:30.317: INFO: 
+Jun  4 16:34:30.317: INFO: StatefulSet ss has not reached scale 0, at 1
+STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-9041
+Jun  4 16:34:31.324: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:34:31.617: INFO: rc: 1
+Jun  4 16:34:31.617: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    error: unable to upgrade connection: container not found ("nginx")
+ []  0xc002ec61b0 exit status 1   true [0xc000011910 0xc0000119a0 0xc000011a30] [0xc000011910 0xc0000119a0 0xc000011a30] [0xc000011998 0xc000011a00] [0x9bf9f0 0x9bf9f0] 0xc0029d9800 }:
+Command stdout:
+
+stderr:
+error: unable to upgrade connection: container not found ("nginx")
+
+error:
+exit status 1
+
+Jun  4 16:34:41.617: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:34:41.718: INFO: rc: 1
+Jun  4 16:34:41.718: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc0031a0f90 exit status 1   true [0xc000e4a5c8 0xc000e4a5e0 0xc000e4a5f8] [0xc000e4a5c8 0xc000e4a5e0 0xc000e4a5f8] [0xc000e4a5d8 0xc000e4a5f0] [0x9bf9f0 0x9bf9f0] 0xc001dce2a0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:34:51.719: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:34:51.916: INFO: rc: 1
+Jun  4 16:34:51.916: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52360 exit status 1   true [0xc002cec008 0xc002cec020 0xc002cec038] [0xc002cec008 0xc002cec020 0xc002cec038] [0xc002cec018 0xc002cec030] [0x9bf9f0 0x9bf9f0] 0xc0026e25a0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:35:01.916: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:35:02.197: INFO: rc: 1
+Jun  4 16:35:02.197: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c526f0 exit status 1   true [0xc002cec040 0xc002cec058 0xc002cec070] [0xc002cec040 0xc002cec058 0xc002cec070] [0xc002cec050 0xc002cec068] [0x9bf9f0 0x9bf9f0] 0xc0026e2ae0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:35:12.197: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:35:12.423: INFO: rc: 1
+Jun  4 16:35:12.423: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52a80 exit status 1   true [0xc002cec078 0xc002cec090 0xc002cec0a8] [0xc002cec078 0xc002cec090 0xc002cec0a8] [0xc002cec088 0xc002cec0a0] [0x9bf9f0 0x9bf9f0] 0xc0026e2f60 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:35:22.424: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:35:22.626: INFO: rc: 1
+Jun  4 16:35:22.627: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002142390 exit status 1   true [0xc0024ca000 0xc0024ca030 0xc0024ca048] [0xc0024ca000 0xc0024ca030 0xc0024ca048] [0xc0024ca028 0xc0024ca040] [0x9bf9f0 0x9bf9f0] 0xc001354540 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:35:32.627: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:35:32.825: INFO: rc: 1
+Jun  4 16:35:32.825: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002142750 exit status 1   true [0xc0024ca050 0xc0024ca090 0xc0024ca0b8] [0xc0024ca050 0xc0024ca090 0xc0024ca0b8] [0xc0024ca088 0xc0024ca0b0] [0x9bf9f0 0x9bf9f0] 0xc001354ae0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:35:42.825: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:35:42.899: INFO: rc: 1
+Jun  4 16:35:42.899: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002142c60 exit status 1   true [0xc0024ca0c0 0xc0024ca0d8 0xc0024ca0f0] [0xc0024ca0c0 0xc0024ca0d8 0xc0024ca0f0] [0xc0024ca0d0 0xc0024ca0e8] [0x9bf9f0 0x9bf9f0] 0xc001354e40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:35:52.900: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:35:52.968: INFO: rc: 1
+Jun  4 16:35:52.968: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002142ff0 exit status 1   true [0xc0024ca0f8 0xc0024ca110 0xc0024ca128] [0xc0024ca0f8 0xc0024ca110 0xc0024ca128] [0xc0024ca108 0xc0024ca120] [0x9bf9f0 0x9bf9f0] 0xc0013552c0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:36:02.968: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:36:03.221: INFO: rc: 1
+Jun  4 16:36:03.221: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002143410 exit status 1   true [0xc0024ca130 0xc0024ca148 0xc0024ca160] [0xc0024ca130 0xc0024ca148 0xc0024ca160] [0xc0024ca140 0xc0024ca158] [0x9bf9f0 0x9bf9f0] 0xc001355a40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:36:13.221: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:36:13.435: INFO: rc: 1
+Jun  4 16:36:13.436: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002143830 exit status 1   true [0xc0024ca168 0xc0024ca188 0xc0024ca1a0] [0xc0024ca168 0xc0024ca188 0xc0024ca1a0] [0xc0024ca180 0xc0024ca198] [0x9bf9f0 0x9bf9f0] 0xc0027941e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:36:23.436: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:36:23.504: INFO: rc: 1
+Jun  4 16:36:23.505: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002143bf0 exit status 1   true [0xc0024ca1a8 0xc0024ca1d8 0xc0024ca1f0] [0xc0024ca1a8 0xc0024ca1d8 0xc0024ca1f0] [0xc0024ca1d0 0xc0024ca1e8] [0x9bf9f0 0x9bf9f0] 0xc0027947e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:36:33.505: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:36:33.603: INFO: rc: 1
+Jun  4 16:36:33.603: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52ea0 exit status 1   true [0xc002cec0b0 0xc002cec0c8 0xc002cec0e0] [0xc002cec0b0 0xc002cec0c8 0xc002cec0e0] [0xc002cec0c0 0xc002cec0d8] [0x9bf9f0 0x9bf9f0] 0xc0026e33e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:36:43.603: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:36:43.838: INFO: rc: 1
+Jun  4 16:36:43.838: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c53230 exit status 1   true [0xc002cec0e8 0xc002cec100 0xc002cec118] [0xc002cec0e8 0xc002cec100 0xc002cec118] [0xc002cec0f8 0xc002cec110] [0x9bf9f0 0x9bf9f0] 0xc0026e39e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:36:53.838: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:36:53.908: INFO: rc: 1
+Jun  4 16:36:53.908: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002142360 exit status 1   true [0xc0024ca020 0xc0024ca038 0xc0024ca050] [0xc0024ca020 0xc0024ca038 0xc0024ca050] [0xc0024ca030 0xc0024ca048] [0x9bf9f0 0x9bf9f0] 0xc001354540 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:37:03.919: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:37:04.045: INFO: rc: 1
+Jun  4 16:37:04.045: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002142780 exit status 1   true [0xc0024ca068 0xc0024ca0a8 0xc0024ca0c0] [0xc0024ca068 0xc0024ca0a8 0xc0024ca0c0] [0xc0024ca090 0xc0024ca0b8] [0x9bf9f0 0x9bf9f0] 0xc001354ae0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:37:14.046: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:37:14.121: INFO: rc: 1
+Jun  4 16:37:14.121: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002142cc0 exit status 1   true [0xc0024ca0c8 0xc0024ca0e0 0xc0024ca0f8] [0xc0024ca0c8 0xc0024ca0e0 0xc0024ca0f8] [0xc0024ca0d8 0xc0024ca0f0] [0x9bf9f0 0x9bf9f0] 0xc001354e40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:37:24.122: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:37:24.200: INFO: rc: 1
+Jun  4 16:37:24.200: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c523f0 exit status 1   true [0xc002cec000 0xc002cec018 0xc002cec030] [0xc002cec000 0xc002cec018 0xc002cec030] [0xc002cec010 0xc002cec028] [0x9bf9f0 0x9bf9f0] 0xc0027945a0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:37:34.200: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:37:34.397: INFO: rc: 1
+Jun  4 16:37:34.397: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52780 exit status 1   true [0xc002cec038 0xc002cec050 0xc002cec068] [0xc002cec038 0xc002cec050 0xc002cec068] [0xc002cec048 0xc002cec060] [0x9bf9f0 0x9bf9f0] 0xc002794cc0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:37:44.397: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:37:44.471: INFO: rc: 1
+Jun  4 16:37:44.471: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52b40 exit status 1   true [0xc002cec070 0xc002cec088 0xc002cec0a0] [0xc002cec070 0xc002cec088 0xc002cec0a0] [0xc002cec080 0xc002cec098] [0x9bf9f0 0x9bf9f0] 0xc002795380 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:37:54.476: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:37:54.642: INFO: rc: 1
+Jun  4 16:37:54.642: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002143080 exit status 1   true [0xc0024ca100 0xc0024ca118 0xc0024ca130] [0xc0024ca100 0xc0024ca118 0xc0024ca130] [0xc0024ca110 0xc0024ca128] [0x9bf9f0 0x9bf9f0] 0xc0013552c0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:38:04.642: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:38:04.733: INFO: rc: 1
+Jun  4 16:38:04.733: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002143530 exit status 1   true [0xc0024ca138 0xc0024ca150 0xc0024ca168] [0xc0024ca138 0xc0024ca150 0xc0024ca168] [0xc0024ca148 0xc0024ca160] [0x9bf9f0 0x9bf9f0] 0xc001355a40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:38:14.733: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:38:14.842: INFO: rc: 1
+Jun  4 16:38:14.842: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002143950 exit status 1   true [0xc0024ca178 0xc0024ca190 0xc0024ca1a8] [0xc0024ca178 0xc0024ca190 0xc0024ca1a8] [0xc0024ca188 0xc0024ca1a0] [0x9bf9f0 0x9bf9f0] 0xc0026e21e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:38:24.843: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:38:25.040: INFO: rc: 1
+Jun  4 16:38:25.040: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52f00 exit status 1   true [0xc002cec0a8 0xc002cec0c0 0xc002cec0d8] [0xc002cec0a8 0xc002cec0c0 0xc002cec0d8] [0xc002cec0b8 0xc002cec0d0] [0x9bf9f0 0x9bf9f0] 0xc0027958c0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:38:35.040: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:38:35.245: INFO: rc: 1
+Jun  4 16:38:35.245: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c532c0 exit status 1   true [0xc002cec0e0 0xc002cec0f8 0xc002cec110] [0xc002cec0e0 0xc002cec0f8 0xc002cec110] [0xc002cec0f0 0xc002cec108] [0x9bf9f0 0x9bf9f0] 0xc002795e00 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:38:45.245: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:38:45.369: INFO: rc: 1
+Jun  4 16:38:45.369: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc002143da0 exit status 1   true [0xc0024ca1b8 0xc0024ca1e0 0xc0024ca1f8] [0xc0024ca1b8 0xc0024ca1e0 0xc0024ca1f8] [0xc0024ca1d8 0xc0024ca1f0] [0x9bf9f0 0x9bf9f0] 0xc0026e28a0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:38:55.369: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:38:55.449: INFO: rc: 1
+Jun  4 16:38:55.449: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52360 exit status 1   true [0xc002cec008 0xc002cec020 0xc002cec038] [0xc002cec008 0xc002cec020 0xc002cec038] [0xc002cec018 0xc002cec030] [0x9bf9f0 0x9bf9f0] 0xc001354540 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:39:05.449: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:39:05.629: INFO: rc: 1
+Jun  4 16:39:05.629: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52720 exit status 1   true [0xc002cec040 0xc002cec058 0xc002cec070] [0xc002cec040 0xc002cec058 0xc002cec070] [0xc002cec050 0xc002cec068] [0x9bf9f0 0x9bf9f0] 0xc001354ae0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:39:15.629: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:39:15.726: INFO: rc: 1
+Jun  4 16:39:15.726: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc0021423c0 exit status 1   true [0xc0024ca000 0xc0024ca030 0xc0024ca048] [0xc0024ca000 0xc0024ca030 0xc0024ca048] [0xc0024ca028 0xc0024ca040] [0x9bf9f0 0x9bf9f0] 0xc0027945a0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:39:25.726: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:39:25.857: INFO: rc: 1
+Jun  4 16:39:25.857: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-0" not found
+ []  0xc001c52ab0 exit status 1   true [0xc002cec078 0xc002cec090 0xc002cec0a8] [0xc002cec078 0xc002cec090 0xc002cec0a8] [0xc002cec088 0xc002cec0a0] [0x9bf9f0 0x9bf9f0] 0xc001354e40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-0" not found
+
+error:
+exit status 1
+
+Jun  4 16:39:35.857: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-9041 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:39:35.942: INFO: rc: 1
+Jun  4 16:39:35.942: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: 
+Jun  4 16:39:35.942: INFO: Scaling statefulset ss to 0
+Jun  4 16:39:35.958: INFO: Waiting for statefulset status.replicas updated to 0
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun  4 16:39:36.029: INFO: Deleting all statefulset in ns statefulset-9041
+Jun  4 16:39:36.035: INFO: Scaling statefulset ss to 0
+Jun  4 16:39:36.053: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 16:39:36.058: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:39:36.081: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-9041" for this suite.
+Jun  4 16:39:42.227: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:39:42.728: INFO: namespace statefulset-9041 deletion completed in 6.641861495s
+
+• [SLOW TEST:369.797 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    Burst scaling should run to completion even with unhealthy pods [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:39:42.728: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace statefulset-6040
+[It] should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a new StatefulSet
+Jun  4 16:39:42.800: INFO: Found 0 stateful pods, waiting for 3
+Jun  4 16:39:52.807: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun  4 16:39:52.807: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun  4 16:39:52.807: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+Jun  4 16:39:52.823: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6040 ss2-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 16:39:54.018: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 16:39:54.018: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 16:39:54.018: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+STEP: Updating StatefulSet template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine
+Jun  4 16:40:04.122: INFO: Updating stateful set ss2
+STEP: Creating a new revision
+STEP: Updating Pods in reverse ordinal order
+Jun  4 16:40:04.140: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6040 ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:40:04.878: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  4 16:40:04.878: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 16:40:04.878: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 16:40:14.917: INFO: Waiting for StatefulSet statefulset-6040/ss2 to complete update
+Jun  4 16:40:14.917: INFO: Waiting for Pod statefulset-6040/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666
+Jun  4 16:40:14.917: INFO: Waiting for Pod statefulset-6040/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666
+Jun  4 16:40:14.917: INFO: Waiting for Pod statefulset-6040/ss2-2 to have revision ss2-c79899b9 update revision ss2-787997d666
+Jun  4 16:40:25.023: INFO: Waiting for StatefulSet statefulset-6040/ss2 to complete update
+Jun  4 16:40:25.023: INFO: Waiting for Pod statefulset-6040/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666
+STEP: Rolling back to a previous revision
+Jun  4 16:40:34.931: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6040 ss2-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 16:40:36.002: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 16:40:36.002: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 16:40:36.002: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 16:40:46.124: INFO: Updating stateful set ss2
+STEP: Rolling back update in reverse ordinal order
+Jun  4 16:40:46.224: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6040 ss2-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 16:40:47.026: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  4 16:40:47.026: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 16:40:47.026: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 16:40:47.054: INFO: Waiting for StatefulSet statefulset-6040/ss2 to complete update
+Jun  4 16:40:47.054: INFO: Waiting for Pod statefulset-6040/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+Jun  4 16:40:47.054: INFO: Waiting for Pod statefulset-6040/ss2-1 to have revision ss2-787997d666 update revision ss2-c79899b9
+Jun  4 16:40:47.054: INFO: Waiting for Pod statefulset-6040/ss2-2 to have revision ss2-787997d666 update revision ss2-c79899b9
+Jun  4 16:40:57.128: INFO: Waiting for StatefulSet statefulset-6040/ss2 to complete update
+Jun  4 16:40:57.128: INFO: Waiting for Pod statefulset-6040/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+Jun  4 16:40:57.128: INFO: Waiting for Pod statefulset-6040/ss2-1 to have revision ss2-787997d666 update revision ss2-c79899b9
+Jun  4 16:41:07.070: INFO: Waiting for StatefulSet statefulset-6040/ss2 to complete update
+Jun  4 16:41:07.071: INFO: Waiting for Pod statefulset-6040/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+Jun  4 16:41:07.071: INFO: Waiting for Pod statefulset-6040/ss2-1 to have revision ss2-787997d666 update revision ss2-c79899b9
+Jun  4 16:41:17.069: INFO: Waiting for StatefulSet statefulset-6040/ss2 to complete update
+Jun  4 16:41:17.069: INFO: Waiting for Pod statefulset-6040/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun  4 16:41:27.144: INFO: Deleting all statefulset in ns statefulset-6040
+Jun  4 16:41:27.149: INFO: Scaling statefulset ss2 to 0
+Jun  4 16:41:47.237: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 16:41:47.246: INFO: Deleting statefulset ss2
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:41:47.337: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-6040" for this suite.
+Jun  4 16:41:53.540: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:41:54.010: INFO: namespace statefulset-6040 deletion completed in 6.665852171s
+
+• [SLOW TEST:131.282 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should perform rolling updates and roll backs of template modifications [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:41:54.010: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating projection with secret that has name projected-secret-test-map-a8e48b4b-86e7-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 16:41:54.147: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac" in namespace "projected-838" to be "success or failure"
+Jun  4 16:41:54.152: INFO: Pod "pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.082278ms
+Jun  4 16:41:56.162: INFO: Pod "pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014549393s
+Jun  4 16:41:58.232: INFO: Pod "pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.084426566s
+STEP: Saw pod success
+Jun  4 16:41:58.232: INFO: Pod "pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:41:58.239: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac container projected-secret-volume-test: 
+STEP: delete the pod
+Jun  4 16:41:58.428: INFO: Waiting for pod pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:41:58.435: INFO: Pod pod-projected-secrets-a8ef8f45-86e7-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:41:58.435: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-838" for this suite.
+Jun  4 16:42:04.549: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:42:05.043: INFO: namespace projected-838 deletion completed in 6.515248606s
+
+• [SLOW TEST:11.033 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:42:05.043: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+Jun  4 16:42:05.156: INFO: Waiting up to 5m0s for pod "pod-af7f8c2f-86e7-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-7831" to be "success or failure"
+Jun  4 16:42:05.165: INFO: Pod "pod-af7f8c2f-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.839816ms
+Jun  4 16:42:07.171: INFO: Pod "pod-af7f8c2f-86e7-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014734296s
+STEP: Saw pod success
+Jun  4 16:42:07.171: INFO: Pod "pod-af7f8c2f-86e7-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:42:07.176: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-af7f8c2f-86e7-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 16:42:07.218: INFO: Waiting for pod pod-af7f8c2f-86e7-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:42:07.224: INFO: Pod pod-af7f8c2f-86e7-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:42:07.224: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-7831" for this suite.
+Jun  4 16:42:13.249: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:42:14.051: INFO: namespace emptydir-7831 deletion completed in 6.821179334s
+
+• [SLOW TEST:9.008 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:42:14.051: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-b4d4b4a3-86e7-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 16:42:14.153: INFO: Waiting up to 5m0s for pod "pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac" in namespace "secrets-6849" to be "success or failure"
+Jun  4 16:42:14.231: INFO: Pod "pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 77.964398ms
+Jun  4 16:42:16.335: INFO: Pod "pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.181079162s
+Jun  4 16:42:18.341: INFO: Pod "pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.187776258s
+STEP: Saw pod success
+Jun  4 16:42:18.341: INFO: Pod "pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:42:18.347: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac container secret-volume-test: 
+STEP: delete the pod
+Jun  4 16:42:18.418: INFO: Waiting for pod pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:42:18.424: INFO: Pod pod-secrets-b4dc7aab-86e7-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:42:18.424: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6849" for this suite.
+Jun  4 16:42:24.456: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:42:25.656: INFO: namespace secrets-6849 deletion completed in 7.226285765s
+STEP: Destroying namespace "secret-namespace-2816" for this suite.
+Jun  4 16:42:31.677: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:42:32.058: INFO: namespace secret-namespace-2816 deletion completed in 6.401510534s
+
+• [SLOW TEST:18.006 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:42:32.058: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap that has name configmap-test-emptyKey-bfb50037-86e7-11e9-a2b6-96b18e3e6fac
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:42:32.343: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-5034" for this suite.
+Jun  4 16:42:38.377: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:42:38.758: INFO: namespace configmap-5034 deletion completed in 6.404870038s
+
+• [SLOW TEST:6.700 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:32
+  should fail to create ConfigMap with empty key [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:42:38.758: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  4 16:42:38.944: INFO: Waiting up to 5m0s for pod "downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-1560" to be "success or failure"
+Jun  4 16:42:38.951: INFO: Pod "downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.294659ms
+Jun  4 16:42:40.957: INFO: Pod "downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012136918s
+Jun  4 16:42:42.963: INFO: Pod "downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018176291s
+STEP: Saw pod success
+Jun  4 16:42:42.963: INFO: Pod "downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:42:43.036: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac container dapi-container: 
+STEP: delete the pod
+Jun  4 16:42:43.122: INFO: Waiting for pod downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:42:43.132: INFO: Pod downward-api-c3a31824-86e7-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:42:43.132: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-1560" for this suite.
+Jun  4 16:42:49.165: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:42:49.581: INFO: namespace downward-api-1560 deletion completed in 6.437904972s
+
+• [SLOW TEST:10.823 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Aggregator 
+  Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:42:49.581: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename aggregator
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:69
+[It] Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Registering the sample API server.
+Jun  4 16:42:50.279: INFO: deployment "sample-apiserver-deployment" doesn't have the required revision set
+Jun  4 16:42:52.377: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-65db6755fc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  4 16:42:54.382: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-65db6755fc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  4 16:42:56.434: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-65db6755fc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  4 16:42:58.383: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263370, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-apiserver-deployment-65db6755fc\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  4 16:43:02.032: INFO: Waited 1.640246922s for the sample-apiserver to be ready to handle requests.
+[AfterEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go:60
+[AfterEach] [sig-api-machinery] Aggregator
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:43:03.519: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "aggregator-8946" for this suite.
+Jun  4 16:43:09.568: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:43:09.955: INFO: namespace aggregator-8946 deletion completed in 6.422896671s
+
+• [SLOW TEST:20.374 seconds]
+[sig-api-machinery] Aggregator
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:43:09.955: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace statefulset-8495
+[It] Should recreate evicted statefulset [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Looking for a node to schedule stateful set and pod
+STEP: Creating pod with conflicting port in namespace statefulset-8495
+STEP: Creating statefulset with conflicting port in namespace statefulset-8495
+STEP: Waiting until pod test-pod will start running in namespace statefulset-8495
+STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-8495
+Jun  4 16:43:14.256: INFO: Observed stateful pod in namespace: statefulset-8495, name: ss-0, uid: d87fd736-86e7-11e9-83c6-06284416dbe9, status phase: Pending. Waiting for statefulset controller to delete.
+Jun  4 16:43:14.532: INFO: Observed stateful pod in namespace: statefulset-8495, name: ss-0, uid: d87fd736-86e7-11e9-83c6-06284416dbe9, status phase: Failed. Waiting for statefulset controller to delete.
+Jun  4 16:43:14.541: INFO: Observed stateful pod in namespace: statefulset-8495, name: ss-0, uid: d87fd736-86e7-11e9-83c6-06284416dbe9, status phase: Failed. Waiting for statefulset controller to delete.
+Jun  4 16:43:14.547: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-8495
+STEP: Removing pod with conflicting port in namespace statefulset-8495
+STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-8495 and will be in running state
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun  4 16:43:18.655: INFO: Deleting all statefulset in ns statefulset-8495
+Jun  4 16:43:18.661: INFO: Scaling statefulset ss to 0
+Jun  4 16:43:28.735: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 16:43:28.743: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:43:28.837: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-8495" for this suite.
+Jun  4 16:43:34.949: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:43:35.578: INFO: namespace statefulset-8495 deletion completed in 6.733960706s
+
+• [SLOW TEST:25.623 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    Should recreate evicted statefulset [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:43:35.579: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating the pod
+Jun  4 16:43:40.488: INFO: Successfully updated pod "labelsupdatee58da77b-86e7-11e9-a2b6-96b18e3e6fac"
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:43:42.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-6732" for this suite.
+Jun  4 16:44:12.730: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:44:13.285: INFO: namespace downward-api-6732 deletion completed in 30.651795039s
+
+• [SLOW TEST:37.706 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:44:13.285: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 16:44:13.432: INFO: Waiting up to 5m0s for pod "downwardapi-volume-fbebaef1-86e7-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-7314" to be "success or failure"
+Jun  4 16:44:13.443: INFO: Pod "downwardapi-volume-fbebaef1-86e7-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 10.460855ms
+Jun  4 16:44:15.465: INFO: Pod "downwardapi-volume-fbebaef1-86e7-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.032390365s
+STEP: Saw pod success
+Jun  4 16:44:15.465: INFO: Pod "downwardapi-volume-fbebaef1-86e7-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:44:15.483: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downwardapi-volume-fbebaef1-86e7-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 16:44:15.655: INFO: Waiting for pod downwardapi-volume-fbebaef1-86e7-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:44:15.660: INFO: Pod downwardapi-volume-fbebaef1-86e7-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:44:15.660: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-7314" for this suite.
+Jun  4 16:44:21.747: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:44:22.264: INFO: namespace downward-api-7314 deletion completed in 6.594629707s
+
+• [SLOW TEST:8.979 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:44:22.264: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename namespaces
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a test namespace
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a pod in the namespace
+STEP: Waiting for the pod to have running status
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Verifying there are no pods in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:44:48.827: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "namespaces-3500" for this suite.
+Jun  4 16:44:55.227: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:44:58.052: INFO: namespace namespaces-3500 deletion completed in 9.219029942s
+STEP: Destroying namespace "nsdeletetest-7076" for this suite.
+Jun  4 16:44:58.057: INFO: Namespace nsdeletetest-7076 was already deleted
+STEP: Destroying namespace "nsdeletetest-6308" for this suite.
+Jun  4 16:45:04.133: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:45:06.726: INFO: namespace nsdeletetest-6308 deletion completed in 8.669590035s
+
+• [SLOW TEST:44.463 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should be able to start watching from a specific resource version [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:45:06.727: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to start watching from a specific resource version [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: modifying the configmap a second time
+STEP: deleting the configmap
+STEP: creating a watch on configmaps from the resource version returned by the first update
+STEP: Expecting to observe notifications for all changes to the configmap after the first update
+Jun  4 16:45:07.826: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:watch-5838,SelfLink:/api/v1/namespaces/watch-5838/configmaps/e2e-watch-test-resource-version,UID:1c1bc3b8-86e8-11e9-83c6-06284416dbe9,ResourceVersion:24135,Generation:0,CreationTimestamp:2019-06-04 16:45:07 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  4 16:45:07.826: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:watch-5838,SelfLink:/api/v1/namespaces/watch-5838/configmaps/e2e-watch-test-resource-version,UID:1c1bc3b8-86e8-11e9-83c6-06284416dbe9,ResourceVersion:24136,Generation:0,CreationTimestamp:2019-06-04 16:45:07 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:45:07.826: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-5838" for this suite.
+Jun  4 16:45:13.852: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:45:16.026: INFO: namespace watch-5838 deletion completed in 8.191667224s
+
+• [SLOW TEST:9.299 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should be able to start watching from a specific resource version [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:45:16.026: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Starting the proxy
+Jun  4 16:45:16.230: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-441229521 proxy --unix-socket=/tmp/kubectl-proxy-unix264591600/test'
+STEP: retrieving proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:45:16.333: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-2966" for this suite.
+Jun  4 16:45:22.643: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:45:24.925: INFO: namespace kubectl-2966 deletion completed in 8.299921565s
+
+• [SLOW TEST:8.899 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Proxy server
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should support --unix-socket=/path  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:45:24.926: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 16:45:25.034: INFO: Creating deployment "test-recreate-deployment"
+Jun  4 16:45:25.044: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1
+Jun  4 16:45:25.100: INFO: deployment "test-recreate-deployment" doesn't have the required revision set
+Jun  4 16:45:27.228: INFO: Waiting deployment "test-recreate-deployment" to complete
+Jun  4 16:45:27.234: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263525, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263525, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263525, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695263525, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-recreate-deployment-7d57d5ff7c\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  4 16:45:37.406: INFO: Triggering a new rollout for deployment "test-recreate-deployment"
+Jun  4 16:45:37.423: INFO: Updating deployment test-recreate-deployment
+Jun  4 16:45:37.423: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun  4 16:45:37.664: INFO: Deployment "test-recreate-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment,GenerateName:,Namespace:deployment-5839,SelfLink:/apis/apps/v1/namespaces/deployment-5839/deployments/test-recreate-deployment,UID:26a9e720-86e8-11e9-83c6-06284416dbe9,ResourceVersion:24271,Generation:2,CreationTimestamp:2019-06-04 16:45:25 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[{Available False 2019-06-04 16:45:37 +0000 UTC 2019-06-04 16:45:37 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-06-04 16:45:37 +0000 UTC 2019-06-04 16:45:25 +0000 UTC ReplicaSetUpdated ReplicaSet "test-recreate-deployment-c9cbd8684" is progressing.}],ReadyReplicas:0,CollisionCount:nil,},}
+
+Jun  4 16:45:37.675: INFO: New ReplicaSet "test-recreate-deployment-c9cbd8684" of Deployment "test-recreate-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-c9cbd8684,GenerateName:,Namespace:deployment-5839,SelfLink:/apis/apps/v1/namespaces/deployment-5839/replicasets/test-recreate-deployment-c9cbd8684,UID:2e187b74-86e8-11e9-83c6-06284416dbe9,ResourceVersion:24270,Generation:1,CreationTimestamp:2019-06-04 16:45:37 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 26a9e720-86e8-11e9-83c6-06284416dbe9 0xc00164a690 0xc00164a691}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun  4 16:45:37.676: INFO: All old ReplicaSets of Deployment "test-recreate-deployment":
+Jun  4 16:45:37.676: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-7d57d5ff7c,GenerateName:,Namespace:deployment-5839,SelfLink:/apis/apps/v1/namespaces/deployment-5839/replicasets/test-recreate-deployment-7d57d5ff7c,UID:26ab8876-86e8-11e9-83c6-06284416dbe9,ResourceVersion:24260,Generation:2,CreationTimestamp:2019-06-04 16:45:25 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 7d57d5ff7c,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 26a9e720-86e8-11e9-83c6-06284416dbe9 0xc00164a547 0xc00164a548}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 7d57d5ff7c,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 7d57d5ff7c,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun  4 16:45:37.685: INFO: Pod "test-recreate-deployment-c9cbd8684-drqgf" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-c9cbd8684-drqgf,GenerateName:test-recreate-deployment-c9cbd8684-,Namespace:deployment-5839,SelfLink:/api/v1/namespaces/deployment-5839/pods/test-recreate-deployment-c9cbd8684-drqgf,UID:2e1a415c-86e8-11e9-83c6-06284416dbe9,ResourceVersion:24272,Generation:0,CreationTimestamp:2019-06-04 16:45:37 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: c9cbd8684,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-recreate-deployment-c9cbd8684 2e187b74-86e8-11e9-83c6-06284416dbe9 0xc002acb690 0xc002acb691}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vjgcp {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vjgcp,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-vjgcp true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002acb6f0} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002acb710}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:45:37 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:45:37 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:45:37 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 16:45:37 +0000 UTC  }],Message:,Reason:,HostIP:172.31.9.156,PodIP:,StartTime:2019-06-04 16:45:37 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:45:37.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-5839" for this suite.
+Jun  4 16:45:43.761: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:45:44.220: INFO: namespace deployment-5839 deletion completed in 6.525119574s
+
+• [SLOW TEST:19.294 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:45:44.221: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name projected-secret-test-3227bf0f-86e8-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 16:45:44.459: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac" in namespace "projected-1550" to be "success or failure"
+Jun  4 16:45:44.464: INFO: Pod "pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.567364ms
+Jun  4 16:45:46.469: INFO: Pod "pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01043391s
+Jun  4 16:45:48.476: INFO: Pod "pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016684695s
+STEP: Saw pod success
+Jun  4 16:45:48.476: INFO: Pod "pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:45:48.480: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac container secret-volume-test: 
+STEP: delete the pod
+Jun  4 16:45:48.562: INFO: Waiting for pod pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:45:48.567: INFO: Pod pod-projected-secrets-3228cc84-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:45:48.567: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-1550" for this suite.
+Jun  4 16:45:54.806: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:45:55.291: INFO: namespace projected-1550 deletion completed in 6.643480316s
+
+• [SLOW TEST:11.071 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a read only busybox container 
+  should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:45:55.292: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:45:59.545: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-7286" for this suite.
+Jun  4 16:46:40.057: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:46:40.511: INFO: namespace kubelet-test-7286 deletion completed in 40.861372309s
+
+• [SLOW TEST:45.219 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a read only busybox container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:187
+    should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:46:40.511: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+Jun  4 16:46:40.667: INFO: Waiting up to 5m0s for pod "pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-9861" to be "success or failure"
+Jun  4 16:46:40.676: INFO: Pod "pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.537642ms
+Jun  4 16:46:42.682: INFO: Pod "pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014655333s
+Jun  4 16:46:44.706: INFO: Pod "pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.038439885s
+STEP: Saw pod success
+Jun  4 16:46:44.706: INFO: Pod "pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:46:44.740: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 16:46:44.940: INFO: Waiting for pod pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:46:44.945: INFO: Pod pod-53b6798b-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:46:44.946: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-9861" for this suite.
+Jun  4 16:46:51.050: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:46:51.476: INFO: namespace emptydir-9861 deletion completed in 6.521294223s
+
+• [SLOW TEST:10.965 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl label 
+  should update the label on a resource  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:46:51.476: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1108
+STEP: creating the pod
+Jun  4 16:46:51.540: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-4501'
+Jun  4 16:46:52.031: INFO: stderr: ""
+Jun  4 16:46:52.031: INFO: stdout: "pod/pause created\n"
+Jun  4 16:46:52.031: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause]
+Jun  4 16:46:52.031: INFO: Waiting up to 5m0s for pod "pause" in namespace "kubectl-4501" to be "running and ready"
+Jun  4 16:46:52.109: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 78.042518ms
+Jun  4 16:46:54.115: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 2.083433499s
+Jun  4 16:46:54.115: INFO: Pod "pause" satisfied condition "running and ready"
+Jun  4 16:46:54.115: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause]
+[It] should update the label on a resource  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: adding the label testing-label with value testing-label-value to a pod
+Jun  4 16:46:54.115: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 label pods pause testing-label=testing-label-value --namespace=kubectl-4501'
+Jun  4 16:46:54.203: INFO: stderr: ""
+Jun  4 16:46:54.203: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod has the label testing-label with the value testing-label-value
+Jun  4 16:46:54.203: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pod pause -L testing-label --namespace=kubectl-4501'
+Jun  4 16:46:54.283: INFO: stderr: ""
+Jun  4 16:46:54.283: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          2s    testing-label-value\n"
+STEP: removing the label testing-label of a pod
+Jun  4 16:46:54.283: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 label pods pause testing-label- --namespace=kubectl-4501'
+Jun  4 16:46:54.370: INFO: stderr: ""
+Jun  4 16:46:54.370: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod doesn't have the label testing-label
+Jun  4 16:46:54.370: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pod pause -L testing-label --namespace=kubectl-4501'
+Jun  4 16:46:54.452: INFO: stderr: ""
+Jun  4 16:46:54.452: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          2s    \n"
+[AfterEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1115
+STEP: using delete to clean up resources
+Jun  4 16:46:54.452: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-4501'
+Jun  4 16:46:54.545: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun  4 16:46:54.545: INFO: stdout: "pod \"pause\" force deleted\n"
+Jun  4 16:46:54.545: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=pause --no-headers --namespace=kubectl-4501'
+Jun  4 16:46:54.640: INFO: stderr: "No resources found.\n"
+Jun  4 16:46:54.640: INFO: stdout: ""
+Jun  4 16:46:54.640: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=pause --namespace=kubectl-4501 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 16:46:54.730: INFO: stderr: ""
+Jun  4 16:46:54.730: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:46:54.730: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-4501" for this suite.
+Jun  4 16:47:00.851: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:47:01.297: INFO: namespace kubectl-4501 deletion completed in 6.561344858s
+
+• [SLOW TEST:9.821 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl label
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should update the label on a resource  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:47:01.298: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 16:47:01.387: INFO: Waiting up to 5m0s for pod "downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac" in namespace "projected-5797" to be "success or failure"
+Jun  4 16:47:01.395: INFO: Pod "downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.731231ms
+Jun  4 16:47:03.441: INFO: Pod "downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.053869741s
+Jun  4 16:47:05.448: INFO: Pod "downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.061312495s
+STEP: Saw pod success
+Jun  4 16:47:05.448: INFO: Pod "downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:47:05.455: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 16:47:05.529: INFO: Waiting for pod downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:47:05.538: INFO: Pod downwardapi-volume-600ebf6d-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:47:05.538: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5797" for this suite.
+Jun  4 16:47:11.771: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:47:12.114: INFO: namespace projected-5797 deletion completed in 6.37160323s
+
+• [SLOW TEST:10.816 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run rc 
+  should create an rc from an image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:47:12.114: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1354
+[It] should create an rc from an image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun  4 16:47:12.168: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=kubectl-6539'
+Jun  4 16:47:12.258: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun  4 16:47:12.258: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+STEP: verifying the pod controlled by rc e2e-test-nginx-rc was created
+STEP: confirm that you can get logs from an rc
+Jun  4 16:47:12.274: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [e2e-test-nginx-rc-tfddl]
+Jun  4 16:47:12.274: INFO: Waiting up to 5m0s for pod "e2e-test-nginx-rc-tfddl" in namespace "kubectl-6539" to be "running and ready"
+Jun  4 16:47:12.280: INFO: Pod "e2e-test-nginx-rc-tfddl": Phase="Pending", Reason="", readiness=false. Elapsed: 5.882661ms
+Jun  4 16:47:14.285: INFO: Pod "e2e-test-nginx-rc-tfddl": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011214321s
+Jun  4 16:47:16.291: INFO: Pod "e2e-test-nginx-rc-tfddl": Phase="Running", Reason="", readiness=true. Elapsed: 4.016788717s
+Jun  4 16:47:16.291: INFO: Pod "e2e-test-nginx-rc-tfddl" satisfied condition "running and ready"
+Jun  4 16:47:16.291: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [e2e-test-nginx-rc-tfddl]
+Jun  4 16:47:16.291: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 logs rc/e2e-test-nginx-rc --namespace=kubectl-6539'
+Jun  4 16:47:16.851: INFO: stderr: ""
+Jun  4 16:47:16.851: INFO: stdout: ""
+[AfterEach] [k8s.io] Kubectl run rc
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1359
+Jun  4 16:47:16.851: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete rc e2e-test-nginx-rc --namespace=kubectl-6539'
+Jun  4 16:47:16.985: INFO: stderr: ""
+Jun  4 16:47:16.985: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:47:16.985: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6539" for this suite.
+Jun  4 16:47:23.018: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:47:23.288: INFO: namespace kubectl-6539 deletion completed in 6.297264207s
+
+• [SLOW TEST:11.174 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run rc
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should create an rc from an image  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:47:23.288: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 16:47:27.383: INFO: Waiting up to 5m0s for pod "client-envvars-6f900624-86e8-11e9-a2b6-96b18e3e6fac" in namespace "pods-6383" to be "success or failure"
+Jun  4 16:47:27.388: INFO: Pod "client-envvars-6f900624-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.28783ms
+Jun  4 16:47:29.394: INFO: Pod "client-envvars-6f900624-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011049414s
+STEP: Saw pod success
+Jun  4 16:47:29.394: INFO: Pod "client-envvars-6f900624-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:47:29.399: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod client-envvars-6f900624-86e8-11e9-a2b6-96b18e3e6fac container env3cont: 
+STEP: delete the pod
+Jun  4 16:47:29.531: INFO: Waiting for pod client-envvars-6f900624-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:47:29.537: INFO: Pod client-envvars-6f900624-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:47:29.537: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-6383" for this suite.
+Jun  4 16:48:15.656: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:48:16.046: INFO: namespace pods-6383 deletion completed in 46.413761781s
+
+• [SLOW TEST:52.757 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:48:16.046: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-map-8ca1fa2c-86e8-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 16:48:16.164: INFO: Waiting up to 5m0s for pod "pod-secrets-8ca33e46-86e8-11e9-a2b6-96b18e3e6fac" in namespace "secrets-8260" to be "success or failure"
+Jun  4 16:48:16.171: INFO: Pod "pod-secrets-8ca33e46-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.200035ms
+Jun  4 16:48:18.176: INFO: Pod "pod-secrets-8ca33e46-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012488977s
+STEP: Saw pod success
+Jun  4 16:48:18.176: INFO: Pod "pod-secrets-8ca33e46-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:48:18.181: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-8ca33e46-86e8-11e9-a2b6-96b18e3e6fac container secret-volume-test: 
+STEP: delete the pod
+Jun  4 16:48:18.299: INFO: Waiting for pod pod-secrets-8ca33e46-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:48:18.303: INFO: Pod pod-secrets-8ca33e46-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:48:18.303: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-8260" for this suite.
+Jun  4 16:48:24.336: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:48:24.647: INFO: namespace secrets-8260 deletion completed in 6.337928547s
+
+• [SLOW TEST:8.601 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:48:24.647: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun  4 16:48:24.797: INFO: Number of nodes with available pods: 0
+Jun  4 16:48:24.797: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:25.829: INFO: Number of nodes with available pods: 0
+Jun  4 16:48:25.829: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:26.816: INFO: Number of nodes with available pods: 0
+Jun  4 16:48:26.816: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:27.836: INFO: Number of nodes with available pods: 1
+Jun  4 16:48:27.836: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:28.824: INFO: Number of nodes with available pods: 3
+Jun  4 16:48:28.824: INFO: Number of running nodes: 3, number of available pods: 3
+STEP: Stop a daemon pod, check that the daemon pod is revived.
+Jun  4 16:48:28.864: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:28.864: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:29.883: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:29.883: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:30.878: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:30.878: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:32.239: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:32.239: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:33.028: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:33.028: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:33.877: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:33.877: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:34.931: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:34.931: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:35.880: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:35.880: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:36.875: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:36.875: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:37.927: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:37.927: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:38.923: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:38.923: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:39.930: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:39.930: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:41.029: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:41.030: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:42.033: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:42.033: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:42.925: INFO: Number of nodes with available pods: 2
+Jun  4 16:48:42.925: INFO: Node ip-172-31-9-156.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:48:43.929: INFO: Number of nodes with available pods: 3
+Jun  4 16:48:43.929: INFO: Number of running nodes: 3, number of available pods: 3
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-9903, will wait for the garbage collector to delete the pods
+Jun  4 16:48:44.001: INFO: Deleting DaemonSet.extensions daemon-set took: 13.065711ms
+Jun  4 16:48:44.601: INFO: Terminating DaemonSet.extensions daemon-set pods took: 600.180612ms
+Jun  4 16:48:57.806: INFO: Number of nodes with available pods: 0
+Jun  4 16:48:57.806: INFO: Number of running nodes: 0, number of available pods: 0
+Jun  4 16:48:57.811: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-9903/daemonsets","resourceVersion":"25087"},"items":null}
+
+Jun  4 16:48:57.819: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-9903/pods","resourceVersion":"25087"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:48:57.841: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-9903" for this suite.
+Jun  4 16:49:03.864: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:49:04.653: INFO: namespace daemonsets-9903 deletion completed in 6.807403395s
+
+• [SLOW TEST:40.006 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl patch 
+  should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:49:04.653: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating Redis RC
+Jun  4 16:49:04.755: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-8362'
+Jun  4 16:49:04.924: INFO: stderr: ""
+Jun  4 16:49:04.924: INFO: stdout: "replicationcontroller/redis-master created\n"
+STEP: Waiting for Redis master to start.
+Jun  4 16:49:05.932: INFO: Selector matched 1 pods for map[app:redis]
+Jun  4 16:49:05.933: INFO: Found 0 / 1
+Jun  4 16:49:06.932: INFO: Selector matched 1 pods for map[app:redis]
+Jun  4 16:49:06.932: INFO: Found 0 / 1
+Jun  4 16:49:07.930: INFO: Selector matched 1 pods for map[app:redis]
+Jun  4 16:49:07.930: INFO: Found 1 / 1
+Jun  4 16:49:07.930: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+STEP: patching all pods
+Jun  4 16:49:07.934: INFO: Selector matched 1 pods for map[app:redis]
+Jun  4 16:49:07.934: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+Jun  4 16:49:07.934: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 patch pod redis-master-lhn7z --namespace=kubectl-8362 -p {"metadata":{"annotations":{"x":"y"}}}'
+Jun  4 16:49:08.028: INFO: stderr: ""
+Jun  4 16:49:08.028: INFO: stdout: "pod/redis-master-lhn7z patched\n"
+STEP: checking annotations
+Jun  4 16:49:08.034: INFO: Selector matched 1 pods for map[app:redis]
+Jun  4 16:49:08.034: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:49:08.034: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-8362" for this suite.
+Jun  4 16:49:30.111: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:49:30.787: INFO: namespace kubectl-8362 deletion completed in 22.744404412s
+
+• [SLOW TEST:26.133 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl patch
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should add annotations for pods in rc  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:49:30.787: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename init-container
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+Jun  4 16:49:30.882: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:49:36.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "init-container-8672" for this suite.
+Jun  4 16:49:42.717: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:49:43.158: INFO: namespace init-container-8672 deletion completed in 6.51381321s
+
+• [SLOW TEST:12.371 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should invoke init containers on a RestartNever pod [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:49:43.158: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0777 on tmpfs
+Jun  4 16:49:43.264: INFO: Waiting up to 5m0s for pod "pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-1663" to be "success or failure"
+Jun  4 16:49:43.272: INFO: Pod "pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.993192ms
+Jun  4 16:49:45.278: INFO: Pod "pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013907932s
+Jun  4 16:49:47.284: INFO: Pod "pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019617883s
+STEP: Saw pod success
+Jun  4 16:49:47.284: INFO: Pod "pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:49:47.288: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 16:49:47.346: INFO: Waiting for pod pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:49:47.352: INFO: Pod pod-c08ce69b-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:49:47.352: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-1663" for this suite.
+Jun  4 16:49:53.380: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:49:53.790: INFO: namespace emptydir-1663 deletion completed in 6.432089798s
+
+• [SLOW TEST:10.632 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:49:53.790: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-8080
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  4 16:49:53.862: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  4 16:50:22.123: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 172.25.0.70 8081 | grep -v '^\s*$'] Namespace:pod-network-test-8080 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:50:22.123: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:50:23.914: INFO: Found all expected endpoints: [netserver-0]
+Jun  4 16:50:23.921: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 172.25.2.139 8081 | grep -v '^\s*$'] Namespace:pod-network-test-8080 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:50:23.921: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:50:25.771: INFO: Found all expected endpoints: [netserver-1]
+Jun  4 16:50:25.776: INFO: ExecWithOptions {Command:[/bin/sh -c echo hostName | nc -w 1 -u 172.25.3.54 8081 | grep -v '^\s*$'] Namespace:pod-network-test-8080 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:50:25.776: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:50:27.336: INFO: Found all expected endpoints: [netserver-2]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:50:27.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-8080" for this suite.
+Jun  4 16:50:51.363: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:50:51.921: INFO: namespace pod-network-test-8080 deletion completed in 24.578212763s
+
+• [SLOW TEST:58.131 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:50:51.922: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 16:50:51.981: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac" in namespace "projected-7937" to be "success or failure"
+Jun  4 16:50:52.024: INFO: Pod "downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 43.088914ms
+Jun  4 16:50:54.030: INFO: Pod "downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.049056675s
+Jun  4 16:50:56.038: INFO: Pod "downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.057496742s
+STEP: Saw pod success
+Jun  4 16:50:56.038: INFO: Pod "downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:50:56.045: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 16:50:56.098: INFO: Waiting for pod downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:50:56.110: INFO: Pod downwardapi-volume-e9828845-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:50:56.110: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7937" for this suite.
+Jun  4 16:51:02.231: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:51:02.745: INFO: namespace projected-7937 deletion completed in 6.618156506s
+
+• [SLOW TEST:10.823 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:51:02.745: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 16:51:02.855: INFO: Waiting up to 5m0s for pod "downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac" in namespace "projected-1561" to be "success or failure"
+Jun  4 16:51:02.866: INFO: Pod "downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 10.290347ms
+Jun  4 16:51:04.871: INFO: Pod "downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015373183s
+Jun  4 16:51:06.919: INFO: Pod "downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.063134532s
+STEP: Saw pod success
+Jun  4 16:51:06.919: INFO: Pod "downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:51:06.928: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 16:51:07.089: INFO: Waiting for pod downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:51:07.095: INFO: Pod downwardapi-volume-effe20fa-86e8-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:51:07.095: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-1561" for this suite.
+Jun  4 16:51:13.138: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:51:13.574: INFO: namespace projected-1561 deletion completed in 6.472128282s
+
+• [SLOW TEST:10.829 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command in a pod 
+  should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:51:13.574: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should print the output to logs [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:51:17.730: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-842" for this suite.
+Jun  4 16:52:03.920: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:52:04.323: INFO: namespace kubelet-test-842 deletion completed in 46.513735546s
+
+• [SLOW TEST:50.749 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a busybox command in a pod
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:40
+    should print the output to logs [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should scale a replication controller  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:52:04.324: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:265
+[It] should scale a replication controller  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a replication controller
+Jun  4 16:52:04.408: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-3684'
+Jun  4 16:52:04.577: INFO: stderr: ""
+Jun  4 16:52:04.577: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun  4 16:52:04.577: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3684'
+Jun  4 16:52:04.669: INFO: stderr: ""
+Jun  4 16:52:04.669: INFO: stdout: "update-demo-nautilus-8tpjm update-demo-nautilus-rspqz "
+Jun  4 16:52:04.669: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:04.838: INFO: stderr: ""
+Jun  4 16:52:04.838: INFO: stdout: ""
+Jun  4 16:52:04.838: INFO: update-demo-nautilus-8tpjm is created but not running
+Jun  4 16:52:09.838: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3684'
+Jun  4 16:52:09.917: INFO: stderr: ""
+Jun  4 16:52:09.917: INFO: stdout: "update-demo-nautilus-8tpjm update-demo-nautilus-rspqz "
+Jun  4 16:52:09.917: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:09.992: INFO: stderr: ""
+Jun  4 16:52:09.992: INFO: stdout: "true"
+Jun  4 16:52:09.992: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:10.091: INFO: stderr: ""
+Jun  4 16:52:10.091: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 16:52:10.092: INFO: validating pod update-demo-nautilus-8tpjm
+Jun  4 16:52:10.233: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 16:52:10.233: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 16:52:10.233: INFO: update-demo-nautilus-8tpjm is verified up and running
+Jun  4 16:52:10.233: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-rspqz -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:10.317: INFO: stderr: ""
+Jun  4 16:52:10.317: INFO: stdout: "true"
+Jun  4 16:52:10.317: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-rspqz -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:10.394: INFO: stderr: ""
+Jun  4 16:52:10.394: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 16:52:10.394: INFO: validating pod update-demo-nautilus-rspqz
+Jun  4 16:52:10.495: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 16:52:10.495: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 16:52:10.495: INFO: update-demo-nautilus-rspqz is verified up and running
+STEP: scaling down the replication controller
+Jun  4 16:52:10.497: INFO: scanned /root for discovery docs: 
+Jun  4 16:52:10.497: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 scale rc update-demo-nautilus --replicas=1 --timeout=5m --namespace=kubectl-3684'
+Jun  4 16:52:11.613: INFO: stderr: ""
+Jun  4 16:52:11.613: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun  4 16:52:11.613: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3684'
+Jun  4 16:52:11.808: INFO: stderr: ""
+Jun  4 16:52:11.808: INFO: stdout: "update-demo-nautilus-8tpjm update-demo-nautilus-rspqz "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+Jun  4 16:52:16.808: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3684'
+Jun  4 16:52:16.924: INFO: stderr: ""
+Jun  4 16:52:16.924: INFO: stdout: "update-demo-nautilus-8tpjm update-demo-nautilus-rspqz "
+STEP: Replicas for name=update-demo: expected=1 actual=2
+Jun  4 16:52:21.924: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3684'
+Jun  4 16:52:22.112: INFO: stderr: ""
+Jun  4 16:52:22.112: INFO: stdout: "update-demo-nautilus-8tpjm "
+Jun  4 16:52:22.112: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:22.249: INFO: stderr: ""
+Jun  4 16:52:22.249: INFO: stdout: "true"
+Jun  4 16:52:22.249: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:22.344: INFO: stderr: ""
+Jun  4 16:52:22.344: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 16:52:22.344: INFO: validating pod update-demo-nautilus-8tpjm
+Jun  4 16:52:22.418: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 16:52:22.418: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 16:52:22.418: INFO: update-demo-nautilus-8tpjm is verified up and running
+STEP: scaling up the replication controller
+Jun  4 16:52:22.420: INFO: scanned /root for discovery docs: 
+Jun  4 16:52:22.420: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 scale rc update-demo-nautilus --replicas=2 --timeout=5m --namespace=kubectl-3684'
+Jun  4 16:52:23.545: INFO: stderr: ""
+Jun  4 16:52:23.545: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun  4 16:52:23.545: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3684'
+Jun  4 16:52:23.720: INFO: stderr: ""
+Jun  4 16:52:23.722: INFO: stdout: "update-demo-nautilus-8tpjm update-demo-nautilus-n2qlg "
+Jun  4 16:52:23.722: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:23.825: INFO: stderr: ""
+Jun  4 16:52:23.825: INFO: stdout: "true"
+Jun  4 16:52:23.825: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:23.933: INFO: stderr: ""
+Jun  4 16:52:23.933: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 16:52:23.933: INFO: validating pod update-demo-nautilus-8tpjm
+Jun  4 16:52:23.943: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 16:52:23.943: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 16:52:23.943: INFO: update-demo-nautilus-8tpjm is verified up and running
+Jun  4 16:52:23.943: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-n2qlg -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:24.240: INFO: stderr: ""
+Jun  4 16:52:24.240: INFO: stdout: ""
+Jun  4 16:52:24.240: INFO: update-demo-nautilus-n2qlg is created but not running
+Jun  4 16:52:29.240: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-3684'
+Jun  4 16:52:29.325: INFO: stderr: ""
+Jun  4 16:52:29.325: INFO: stdout: "update-demo-nautilus-8tpjm update-demo-nautilus-n2qlg "
+Jun  4 16:52:29.325: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:29.399: INFO: stderr: ""
+Jun  4 16:52:29.399: INFO: stdout: "true"
+Jun  4 16:52:29.399: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-8tpjm -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:29.479: INFO: stderr: ""
+Jun  4 16:52:29.479: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 16:52:29.479: INFO: validating pod update-demo-nautilus-8tpjm
+Jun  4 16:52:29.513: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 16:52:29.513: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 16:52:29.513: INFO: update-demo-nautilus-8tpjm is verified up and running
+Jun  4 16:52:29.513: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-n2qlg -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:29.597: INFO: stderr: ""
+Jun  4 16:52:29.597: INFO: stdout: "true"
+Jun  4 16:52:29.597: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-n2qlg -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-3684'
+Jun  4 16:52:29.709: INFO: stderr: ""
+Jun  4 16:52:29.709: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 16:52:29.710: INFO: validating pod update-demo-nautilus-n2qlg
+Jun  4 16:52:29.904: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 16:52:29.904: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 16:52:29.904: INFO: update-demo-nautilus-n2qlg is verified up and running
+STEP: using delete to clean up resources
+Jun  4 16:52:29.904: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-3684'
+Jun  4 16:52:30.133: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun  4 16:52:30.133: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n"
+Jun  4 16:52:30.133: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-3684'
+Jun  4 16:52:30.240: INFO: stderr: "No resources found.\n"
+Jun  4 16:52:30.240: INFO: stdout: ""
+Jun  4 16:52:30.240: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=update-demo --namespace=kubectl-3684 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 16:52:30.347: INFO: stderr: ""
+Jun  4 16:52:30.347: INFO: stdout: "update-demo-nautilus-8tpjm\nupdate-demo-nautilus-n2qlg\n"
+Jun  4 16:52:30.847: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-3684'
+Jun  4 16:52:30.943: INFO: stderr: "No resources found.\n"
+Jun  4 16:52:30.943: INFO: stdout: ""
+Jun  4 16:52:30.943: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=update-demo --namespace=kubectl-3684 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 16:52:31.138: INFO: stderr: ""
+Jun  4 16:52:31.138: INFO: stdout: "update-demo-nautilus-8tpjm\nupdate-demo-nautilus-n2qlg\n"
+Jun  4 16:52:31.347: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-3684'
+Jun  4 16:52:31.465: INFO: stderr: "No resources found.\n"
+Jun  4 16:52:31.465: INFO: stdout: ""
+Jun  4 16:52:31.465: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=update-demo --namespace=kubectl-3684 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 16:52:31.553: INFO: stderr: ""
+Jun  4 16:52:31.553: INFO: stdout: "update-demo-nautilus-8tpjm\nupdate-demo-nautilus-n2qlg\n"
+Jun  4 16:52:31.847: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-3684'
+Jun  4 16:52:32.026: INFO: stderr: "No resources found.\n"
+Jun  4 16:52:32.026: INFO: stdout: ""
+Jun  4 16:52:32.026: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=update-demo --namespace=kubectl-3684 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 16:52:32.107: INFO: stderr: ""
+Jun  4 16:52:32.107: INFO: stdout: "update-demo-nautilus-8tpjm\nupdate-demo-nautilus-n2qlg\n"
+Jun  4 16:52:32.347: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-3684'
+Jun  4 16:52:32.707: INFO: stderr: "No resources found.\n"
+Jun  4 16:52:32.707: INFO: stdout: ""
+Jun  4 16:52:32.707: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=update-demo --namespace=kubectl-3684 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 16:52:32.820: INFO: stderr: ""
+Jun  4 16:52:32.820: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:52:32.820: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-3684" for this suite.
+Jun  4 16:52:38.904: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:52:39.250: INFO: namespace kubectl-3684 deletion completed in 6.42258133s
+
+• [SLOW TEST:34.926 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Update Demo
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should scale a replication controller  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:52:39.250: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod liveness-http in namespace container-probe-4712
+Jun  4 16:52:43.319: INFO: Started pod liveness-http in namespace container-probe-4712
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun  4 16:52:43.324: INFO: Initial restart count of pod liveness-http is 0
+Jun  4 16:53:01.634: INFO: Restart count of pod container-probe-4712/liveness-http is now 1 (18.310687827s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:53:01.731: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-4712" for this suite.
+Jun  4 16:53:07.773: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:53:08.239: INFO: namespace container-probe-4712 deletion completed in 6.493299916s
+
+• [SLOW TEST:28.989 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases 
+  should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:53:08.240: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[It] should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:53:12.530: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-9562" for this suite.
+Jun  4 16:53:50.611: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:53:51.332: INFO: namespace kubelet-test-9562 deletion completed in 38.795586006s
+
+• [SLOW TEST:43.092 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a busybox Pod with hostAliases
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:136
+    should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-network] DNS 
+  should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:53:51.332: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename dns
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-692.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.dns-692.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-692.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do test -n "$$(getent hosts dns-querier-1.dns-test-service.dns-692.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.dns-692.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".dns-692.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done
+
+STEP: creating a pod to probe /etc/hosts
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+Jun  4 16:53:56.395: INFO: DNS probes using dns-692/dns-test-547e2745-86e9-11e9-a2b6-96b18e3e6fac succeeded
+
+STEP: deleting the pod
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:53:56.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "dns-692" for this suite.
+Jun  4 16:54:02.439: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:54:02.811: INFO: namespace dns-692 deletion completed in 6.390917285s
+
+• [SLOW TEST:11.479 seconds]
+[sig-network] DNS
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide /etc/hosts entries for the cluster [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:54:02.811: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 16:54:02.861: INFO: Waiting up to 5m0s for pod "downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac" in namespace "projected-3999" to be "success or failure"
+Jun  4 16:54:02.866: INFO: Pod "downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.293244ms
+Jun  4 16:54:04.873: INFO: Pod "downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011815391s
+Jun  4 16:54:06.882: INFO: Pod "downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021061499s
+STEP: Saw pod success
+Jun  4 16:54:06.882: INFO: Pod "downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:54:06.888: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 16:54:07.054: INFO: Waiting for pod downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:54:07.098: INFO: Pod downwardapi-volume-5b4945cc-86e9-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:54:07.098: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-3999" for this suite.
+Jun  4 16:54:13.305: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:54:13.592: INFO: namespace projected-3999 deletion completed in 6.477586256s
+
+• [SLOW TEST:10.781 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] KubeletManagedEtcHosts 
+  should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:54:13.592: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Setting up the test
+STEP: Creating hostNetwork=false pod
+STEP: Creating hostNetwork=true pod
+STEP: Running the test
+STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false
+Jun  4 16:54:21.719: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:21.719: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:22.407: INFO: Exec stderr: ""
+Jun  4 16:54:22.407: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:22.407: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:23.199: INFO: Exec stderr: ""
+Jun  4 16:54:23.200: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:23.200: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:23.857: INFO: Exec stderr: ""
+Jun  4 16:54:23.857: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:23.857: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:24.563: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount
+Jun  4 16:54:24.563: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:24.563: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:25.276: INFO: Exec stderr: ""
+Jun  4 16:54:25.276: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:25.276: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:26.218: INFO: Exec stderr: ""
+STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true
+Jun  4 16:54:26.218: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:26.218: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:26.923: INFO: Exec stderr: ""
+Jun  4 16:54:26.923: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:26.923: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:27.830: INFO: Exec stderr: ""
+Jun  4 16:54:27.830: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:27.830: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:28.729: INFO: Exec stderr: ""
+Jun  4 16:54:28.729: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-kubelet-etc-hosts-1974 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:54:28.729: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:54:29.561: INFO: Exec stderr: ""
+[AfterEach] [k8s.io] KubeletManagedEtcHosts
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:54:29.561: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-kubelet-etc-hosts-1974" for this suite.
+Jun  4 16:55:21.634: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:55:22.637: INFO: namespace e2e-kubelet-etc-hosts-1974 deletion completed in 53.029104887s
+
+• [SLOW TEST:69.045 seconds]
+[k8s.io] KubeletManagedEtcHosts
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:55:22.638: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 16:55:22.762: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac" in namespace "projected-5735" to be "success or failure"
+Jun  4 16:55:22.769: INFO: Pod "downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.387988ms
+Jun  4 16:55:24.775: INFO: Pod "downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012141601s
+Jun  4 16:55:26.819: INFO: Pod "downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.056415899s
+STEP: Saw pod success
+Jun  4 16:55:26.819: INFO: Pod "downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:55:26.824: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 16:55:26.970: INFO: Waiting for pod downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:55:26.981: INFO: Pod downwardapi-volume-8ae8bb40-86e9-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:55:26.981: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5735" for this suite.
+Jun  4 16:55:33.026: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:55:33.616: INFO: namespace projected-5735 deletion completed in 6.620302639s
+
+• [SLOW TEST:10.978 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl api-versions 
+  should check if v1 is in available api versions  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:55:33.617: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should check if v1 is in available api versions  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: validating api versions
+Jun  4 16:55:33.700: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 api-versions'
+Jun  4 16:55:33.780: INFO: stderr: ""
+Jun  4 16:55:33.780: INFO: stdout: "admissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\napps/v1beta1\napps/v1beta2\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1beta1\ncluster.k8s.io/v1alpha1\ncoordination.k8s.io/v1\ncoordination.k8s.io/v1beta1\ncrd.projectcalico.org/v1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nmetrics.k8s.io/v1beta1\nnetworking.k8s.io/v1\nnetworking.k8s.io/v1beta1\nnode.k8s.io/v1beta1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1\nscheduling.k8s.io/v1beta1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:55:33.780: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9757" for this suite.
+Jun  4 16:55:39.802: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:55:40.543: INFO: namespace kubectl-9757 deletion completed in 6.756419439s
+
+• [SLOW TEST:6.926 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl api-versions
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should check if v1 is in available api versions  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-network] Services 
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:55:40.543: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename services
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:86
+[It] should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating service endpoint-test2 in namespace services-7511
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7511 to expose endpoints map[]
+Jun  4 16:55:40.667: INFO: successfully validated that service endpoint-test2 in namespace services-7511 exposes endpoints map[] (6.328063ms elapsed)
+STEP: Creating pod pod1 in namespace services-7511
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7511 to expose endpoints map[pod1:[80]]
+Jun  4 16:55:43.732: INFO: successfully validated that service endpoint-test2 in namespace services-7511 exposes endpoints map[pod1:[80]] (3.054068285s elapsed)
+STEP: Creating pod pod2 in namespace services-7511
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7511 to expose endpoints map[pod1:[80] pod2:[80]]
+Jun  4 16:55:47.129: INFO: successfully validated that service endpoint-test2 in namespace services-7511 exposes endpoints map[pod1:[80] pod2:[80]] (3.307263802s elapsed)
+STEP: Deleting pod pod1 in namespace services-7511
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7511 to expose endpoints map[pod2:[80]]
+Jun  4 16:55:47.160: INFO: successfully validated that service endpoint-test2 in namespace services-7511 exposes endpoints map[pod2:[80]] (14.724837ms elapsed)
+STEP: Deleting pod pod2 in namespace services-7511
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace services-7511 to expose endpoints map[]
+Jun  4 16:55:47.186: INFO: successfully validated that service endpoint-test2 in namespace services-7511 exposes endpoints map[] (5.958408ms elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:55:47.208: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-7511" for this suite.
+Jun  4 16:55:53.232: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:55:53.697: INFO: namespace services-7511 deletion completed in 6.483355579s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:91
+
+• [SLOW TEST:13.154 seconds]
+[sig-network] Services
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with projected pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:55:53.697: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with projected pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod pod-subpath-test-projected-h2k8
+STEP: Creating a pod to test atomic-volume-subpath
+Jun  4 16:55:54.139: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-h2k8" in namespace "subpath-821" to be "success or failure"
+Jun  4 16:55:54.151: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Pending", Reason="", readiness=false. Elapsed: 11.875859ms
+Jun  4 16:55:56.157: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 2.017943378s
+Jun  4 16:55:58.165: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 4.025569641s
+Jun  4 16:56:00.170: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 6.031349513s
+Jun  4 16:56:02.297: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 8.157775099s
+Jun  4 16:56:04.323: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 10.183477826s
+Jun  4 16:56:06.329: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 12.190218552s
+Jun  4 16:56:08.420: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 14.280695243s
+Jun  4 16:56:10.428: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 16.288427236s
+Jun  4 16:56:12.433: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 18.294137917s
+Jun  4 16:56:14.439: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Running", Reason="", readiness=true. Elapsed: 20.299557253s
+Jun  4 16:56:16.444: INFO: Pod "pod-subpath-test-projected-h2k8": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.304541435s
+STEP: Saw pod success
+Jun  4 16:56:16.444: INFO: Pod "pod-subpath-test-projected-h2k8" satisfied condition "success or failure"
+Jun  4 16:56:16.448: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-subpath-test-projected-h2k8 container test-container-subpath-projected-h2k8: 
+STEP: delete the pod
+Jun  4 16:56:16.490: INFO: Waiting for pod pod-subpath-test-projected-h2k8 to disappear
+Jun  4 16:56:16.494: INFO: Pod pod-subpath-test-projected-h2k8 no longer exists
+STEP: Deleting pod pod-subpath-test-projected-h2k8
+Jun  4 16:56:16.494: INFO: Deleting pod "pod-subpath-test-projected-h2k8" in namespace "subpath-821"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:56:16.500: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-821" for this suite.
+Jun  4 16:56:22.527: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:56:22.905: INFO: namespace subpath-821 deletion completed in 6.400007805s
+
+• [SLOW TEST:29.208 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with projected pod [LinuxOnly] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Container Runtime blackbox test when starting a container that exits 
+  should run with the expected status [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:56:22.906: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-runtime
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should run with the expected status [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount'
+STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase'
+STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition
+STEP: Container 'terminate-cmd-rpa': should get the expected 'State'
+STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance]
+STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount'
+STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase'
+STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition
+STEP: Container 'terminate-cmd-rpof': should get the expected 'State'
+STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance]
+STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount'
+STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase'
+STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition
+STEP: Container 'terminate-cmd-rpn': should get the expected 'State'
+STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance]
+[AfterEach] [k8s.io] Container Runtime
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:56:51.849: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-runtime-6700" for this suite.
+Jun  4 16:56:57.880: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:56:58.505: INFO: namespace container-runtime-6700 deletion completed in 6.650112532s
+
+• [SLOW TEST:35.599 seconds]
+[k8s.io] Container Runtime
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  blackbox test
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:37
+    when starting a container that exits
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38
+      should run with the expected status [NodeConformance] [Conformance]
+      /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-apps] ReplicaSet 
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:56:58.505: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename replicaset
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Given a Pod with a 'name' label pod-adoption-release is created
+STEP: When a replicaset with a matching selector is created
+STEP: Then the orphan pod is adopted
+STEP: When the matched label of one of its pods change
+Jun  4 16:57:03.685: INFO: Pod name pod-adoption-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:57:03.706: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replicaset-5842" for this suite.
+Jun  4 16:57:25.794: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:57:26.278: INFO: namespace replicaset-5842 deletion completed in 22.555924578s
+
+• [SLOW TEST:27.773 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should adopt matching pods on creation and release no longer matching pods [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:57:26.278: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-2706
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  4 16:57:26.373: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  4 16:57:53.403: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://172.25.2.156:8080/dial?request=hostName&protocol=udp&host=172.25.3.61&port=8081&tries=1'] Namespace:pod-network-test-2706 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:57:53.403: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:57:54.798: INFO: Waiting for endpoints: map[]
+Jun  4 16:57:54.896: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://172.25.2.156:8080/dial?request=hostName&protocol=udp&host=172.25.2.155&port=8081&tries=1'] Namespace:pod-network-test-2706 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:57:54.896: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:57:55.798: INFO: Waiting for endpoints: map[]
+Jun  4 16:57:55.803: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://172.25.2.156:8080/dial?request=hostName&protocol=udp&host=172.25.0.71&port=8081&tries=1'] Namespace:pod-network-test-2706 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 16:57:55.803: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 16:57:58.391: INFO: Waiting for endpoints: map[]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:57:58.391: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-2706" for this suite.
+Jun  4 16:58:22.798: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:58:24.023: INFO: namespace pod-network-test-2706 deletion completed in 25.399171965s
+
+• [SLOW TEST:57.745 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for intra-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-storage] HostPath 
+  should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:58:24.023: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename hostpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:37
+[It] should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test hostPath mode
+Jun  4 16:58:24.113: INFO: Waiting up to 5m0s for pod "pod-host-path-test" in namespace "hostpath-8276" to be "success or failure"
+Jun  4 16:58:24.118: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 5.191148ms
+Jun  4 16:58:26.133: INFO: Pod "pod-host-path-test": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020320552s
+STEP: Saw pod success
+Jun  4 16:58:26.133: INFO: Pod "pod-host-path-test" satisfied condition "success or failure"
+Jun  4 16:58:26.154: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-host-path-test container test-container-1: 
+STEP: delete the pod
+Jun  4 16:58:26.447: INFO: Waiting for pod pod-host-path-test to disappear
+Jun  4 16:58:26.451: INFO: Pod pod-host-path-test no longer exists
+[AfterEach] [sig-storage] HostPath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:58:26.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "hostpath-8276" for this suite.
+Jun  4 16:58:32.477: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:58:33.600: INFO: namespace hostpath-8276 deletion completed in 7.141370111s
+
+• [SLOW TEST:9.577 seconds]
+[sig-storage] HostPath
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:34
+  should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:58:33.601: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 16:58:33.990: INFO: Waiting up to 5m0s for pod "downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-534" to be "success or failure"
+Jun  4 16:58:34.291: INFO: Pod "downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 300.578068ms
+Jun  4 16:58:36.296: INFO: Pod "downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.306211841s
+Jun  4 16:58:38.395: INFO: Pod "downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.404657905s
+STEP: Saw pod success
+Jun  4 16:58:38.395: INFO: Pod "downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:58:38.419: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 16:58:38.617: INFO: Waiting for pod downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:58:38.622: INFO: Pod downwardapi-volume-fcd717b4-86e9-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:58:38.622: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-534" for this suite.
+Jun  4 16:58:44.694: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:58:45.405: INFO: namespace downward-api-534 deletion completed in 6.772651388s
+
+• [SLOW TEST:11.804 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:58:45.405: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0644 on node default medium
+Jun  4 16:58:45.480: INFO: Waiting up to 5m0s for pod "pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-6729" to be "success or failure"
+Jun  4 16:58:45.487: INFO: Pod "pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.277194ms
+Jun  4 16:58:47.495: INFO: Pod "pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014558083s
+Jun  4 16:58:49.501: INFO: Pod "pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020945029s
+STEP: Saw pod success
+Jun  4 16:58:49.501: INFO: Pod "pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 16:58:49.506: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 16:58:55.237: INFO: Waiting for pod pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 16:58:55.241: INFO: Pod pod-03bcbd0a-86ea-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 16:58:55.241: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-6729" for this suite.
+Jun  4 16:59:01.274: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 16:59:01.789: INFO: namespace emptydir-6729 deletion completed in 6.541516262s
+
+• [SLOW TEST:16.384 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 16:59:01.790: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 16:59:02.190: INFO: Create a RollingUpdate DaemonSet
+Jun  4 16:59:02.202: INFO: Check that daemon pods launch on every node of the cluster
+Jun  4 16:59:02.226: INFO: Number of nodes with available pods: 0
+Jun  4 16:59:02.226: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:59:03.243: INFO: Number of nodes with available pods: 0
+Jun  4 16:59:03.243: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:59:04.896: INFO: Number of nodes with available pods: 2
+Jun  4 16:59:04.896: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 16:59:05.590: INFO: Number of nodes with available pods: 3
+Jun  4 16:59:05.590: INFO: Number of running nodes: 3, number of available pods: 3
+Jun  4 16:59:05.590: INFO: Update the DaemonSet to trigger a rollout
+Jun  4 16:59:05.706: INFO: Updating DaemonSet daemon-set
+Jun  4 16:59:16.904: INFO: Roll back the DaemonSet before rollout is complete
+Jun  4 16:59:16.916: INFO: Updating DaemonSet daemon-set
+Jun  4 16:59:16.916: INFO: Make sure DaemonSet rollback is complete
+Jun  4 16:59:16.926: INFO: Wrong image for pod: daemon-set-725ch. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun  4 16:59:16.926: INFO: Pod daemon-set-725ch is not available
+Jun  4 16:59:17.940: INFO: Wrong image for pod: daemon-set-725ch. Expected: docker.io/library/nginx:1.14-alpine, got: foo:non-existent.
+Jun  4 16:59:17.940: INFO: Pod daemon-set-725ch is not available
+Jun  4 16:59:18.941: INFO: Pod daemon-set-jrn9j is not available
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-9638, will wait for the garbage collector to delete the pods
+Jun  4 16:59:19.189: INFO: Deleting DaemonSet.extensions daemon-set took: 102.746107ms
+Jun  4 16:59:19.689: INFO: Terminating DaemonSet.extensions daemon-set pods took: 500.193173ms
+Jun  4 17:00:10.001: INFO: Number of nodes with available pods: 0
+Jun  4 17:00:10.001: INFO: Number of running nodes: 0, number of available pods: 0
+Jun  4 17:00:10.009: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-9638/daemonsets","resourceVersion":"27821"},"items":null}
+
+Jun  4 17:00:10.018: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-9638/pods","resourceVersion":"27821"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:00:10.105: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-9638" for this suite.
+Jun  4 17:00:16.137: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:00:16.770: INFO: namespace daemonsets-9638 deletion completed in 6.654483338s
+
+• [SLOW TEST:74.980 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should rollback without unnecessary restarts [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-apps] Deployment 
+  RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:00:16.770: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 17:00:16.844: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted)
+Jun  4 17:00:16.873: INFO: Pod name sample-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun  4 17:00:20.891: INFO: Creating deployment "test-rolling-update-deployment"
+Jun  4 17:00:20.900: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has
+Jun  4 17:00:20.918: INFO: deployment "test-rolling-update-deployment" doesn't have the required revision set
+Jun  4 17:00:22.930: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected
+Jun  4 17:00:22.934: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:2, UpdatedReplicas:1, ReadyReplicas:1, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695264420, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695264420, loc:(*time.Location)(0x8a060e0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63695264420, loc:(*time.Location)(0x8a060e0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63695264420, loc:(*time.Location)(0x8a060e0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rolling-update-deployment-67599b4d9\" is progressing."}}, CollisionCount:(*int32)(nil)}
+Jun  4 17:00:24.939: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted)
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun  4 17:00:24.954: INFO: Deployment "test-rolling-update-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment,GenerateName:,Namespace:deployment-3917,SelfLink:/apis/apps/v1/namespaces/deployment-3917/deployments/test-rolling-update-deployment,UID:3c9ffa47-86ea-11e9-83c6-06284416dbe9,ResourceVersion:27935,Generation:1,CreationTimestamp:2019-06-04 17:00:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-04 17:00:20 +0000 UTC 2019-06-04 17:00:20 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-04 17:00:23 +0000 UTC 2019-06-04 17:00:20 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rolling-update-deployment-67599b4d9" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+Jun  4 17:00:25.039: INFO: New ReplicaSet "test-rolling-update-deployment-67599b4d9" of Deployment "test-rolling-update-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-67599b4d9,GenerateName:,Namespace:deployment-3917,SelfLink:/apis/apps/v1/namespaces/deployment-3917/replicasets/test-rolling-update-deployment-67599b4d9,UID:3ca251ad-86ea-11e9-83c6-06284416dbe9,ResourceVersion:27925,Generation:1,CreationTimestamp:2019-06-04 17:00:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 3c9ffa47-86ea-11e9-83c6-06284416dbe9 0xc001a61ba0 0xc001a61ba1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun  4 17:00:25.039: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment":
+Jun  4 17:00:25.039: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-controller,GenerateName:,Namespace:deployment-3917,SelfLink:/apis/apps/v1/namespaces/deployment-3917/replicasets/test-rolling-update-controller,UID:3a3651af-86ea-11e9-83c6-06284416dbe9,ResourceVersion:27934,Generation:2,CreationTimestamp:2019-06-04 17:00:16 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305832,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 3c9ffa47-86ea-11e9-83c6-06284416dbe9 0xc001a61ad7 0xc001a61ad8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+Jun  4 17:00:25.090: INFO: Pod "test-rolling-update-deployment-67599b4d9-rxrtd" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-67599b4d9-rxrtd,GenerateName:test-rolling-update-deployment-67599b4d9-,Namespace:deployment-3917,SelfLink:/api/v1/namespaces/deployment-3917/pods/test-rolling-update-deployment-67599b4d9-rxrtd,UID:3ca32e3a-86ea-11e9-83c6-06284416dbe9,ResourceVersion:27924,Generation:0,CreationTimestamp:2019-06-04 17:00:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 67599b4d9,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rolling-update-deployment-67599b4d9 3ca251ad-86ea-11e9-83c6-06284416dbe9 0xc0029224c0 0xc0029224c1}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-dq7bd {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-dq7bd,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-dq7bd true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc002922520} {node.kubernetes.io/unreachable Exists  NoExecute 0xc002922540}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:00:21 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:00:22 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:00:22 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:00:20 +0000 UTC  }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.162,StartTime:2019-06-04 17:00:21 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-04 17:00:22 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://a1a4b5fe34aa9857e8bba7a21a32c2afc40dc912ca853160a854f032a9806501}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:00:25.090: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-3917" for this suite.
+Jun  4 17:00:31.116: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:00:32.013: INFO: namespace deployment-3917 deletion completed in 6.913781828s
+
+• [SLOW TEST:15.243 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  RollingUpdateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:00:32.014: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a watch on configmaps with a certain label
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: changing the label value of the configmap
+STEP: Expecting to observe a delete notification for the watched object
+Jun  4 17:00:32.341: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8330,SelfLink:/api/v1/namespaces/watch-8330/configmaps/e2e-watch-test-label-changed,UID:436d3e2e-86ea-11e9-83c6-06284416dbe9,ResourceVersion:27985,Generation:0,CreationTimestamp:2019-06-04 17:00:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  4 17:00:32.341: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8330,SelfLink:/api/v1/namespaces/watch-8330/configmaps/e2e-watch-test-label-changed,UID:436d3e2e-86ea-11e9-83c6-06284416dbe9,ResourceVersion:27986,Generation:0,CreationTimestamp:2019-06-04 17:00:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+Jun  4 17:00:32.342: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8330,SelfLink:/api/v1/namespaces/watch-8330/configmaps/e2e-watch-test-label-changed,UID:436d3e2e-86ea-11e9-83c6-06284416dbe9,ResourceVersion:27987,Generation:0,CreationTimestamp:2019-06-04 17:00:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time
+STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements
+STEP: changing the label value of the configmap back
+STEP: modifying the configmap a third time
+STEP: deleting the configmap
+STEP: Expecting to observe an add notification for the watched object when the label value was restored
+Jun  4 17:00:42.590: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8330,SelfLink:/api/v1/namespaces/watch-8330/configmaps/e2e-watch-test-label-changed,UID:436d3e2e-86ea-11e9-83c6-06284416dbe9,ResourceVersion:28013,Generation:0,CreationTimestamp:2019-06-04 17:00:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  4 17:00:42.590: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8330,SelfLink:/api/v1/namespaces/watch-8330/configmaps/e2e-watch-test-label-changed,UID:436d3e2e-86ea-11e9-83c6-06284416dbe9,ResourceVersion:28014,Generation:0,CreationTimestamp:2019-06-04 17:00:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+Jun  4 17:00:42.590: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:watch-8330,SelfLink:/api/v1/namespaces/watch-8330/configmaps/e2e-watch-test-label-changed,UID:436d3e2e-86ea-11e9-83c6-06284416dbe9,ResourceVersion:28015,Generation:0,CreationTimestamp:2019-06-04 17:00:32 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:00:42.590: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-8330" for this suite.
+Jun  4 17:00:48.618: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:00:49.791: INFO: namespace watch-8330 deletion completed in 7.191953791s
+
+• [SLOW TEST:17.777 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:00:49.791: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 17:00:50.012: INFO: Waiting up to 5m0s for pod "downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-3014" to be "success or failure"
+Jun  4 17:00:50.018: INFO: Pod "downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.482622ms
+Jun  4 17:00:52.190: INFO: Pod "downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.177687646s
+Jun  4 17:00:54.223: INFO: Pod "downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.211331138s
+STEP: Saw pod success
+Jun  4 17:00:54.223: INFO: Pod "downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:00:54.228: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 17:00:54.404: INFO: Waiting for pod downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:00:54.413: INFO: Pod downwardapi-volume-4df6913f-86ea-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:00:54.413: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-3014" for this suite.
+Jun  4 17:01:00.493: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:01:01.646: INFO: namespace downward-api-3014 deletion completed in 7.224309806s
+
+• [SLOW TEST:11.854 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with downward pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:01:01.647: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with downward pod [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod pod-subpath-test-downwardapi-s72w
+STEP: Creating a pod to test atomic-volume-subpath
+Jun  4 17:01:01.725: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-s72w" in namespace "subpath-728" to be "success or failure"
+Jun  4 17:01:01.734: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Pending", Reason="", readiness=false. Elapsed: 8.200917ms
+Jun  4 17:01:03.742: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Pending", Reason="", readiness=false. Elapsed: 2.016870189s
+Jun  4 17:01:05.750: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 4.024317461s
+Jun  4 17:01:07.793: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 6.06729411s
+Jun  4 17:01:09.799: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 8.073870549s
+Jun  4 17:01:11.806: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 10.080373325s
+Jun  4 17:01:13.812: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 12.086296519s
+Jun  4 17:01:15.892: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 14.166738239s
+Jun  4 17:01:17.898: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 16.172671312s
+Jun  4 17:01:19.923: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 18.197867535s
+Jun  4 17:01:21.932: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Running", Reason="", readiness=true. Elapsed: 20.206274795s
+Jun  4 17:01:23.939: INFO: Pod "pod-subpath-test-downwardapi-s72w": Phase="Succeeded", Reason="", readiness=false. Elapsed: 22.213236148s
+STEP: Saw pod success
+Jun  4 17:01:23.939: INFO: Pod "pod-subpath-test-downwardapi-s72w" satisfied condition "success or failure"
+Jun  4 17:01:23.943: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-subpath-test-downwardapi-s72w container test-container-subpath-downwardapi-s72w: 
+STEP: delete the pod
+Jun  4 17:01:23.988: INFO: Waiting for pod pod-subpath-test-downwardapi-s72w to disappear
+Jun  4 17:01:23.999: INFO: Pod pod-subpath-test-downwardapi-s72w no longer exists
+STEP: Deleting pod pod-subpath-test-downwardapi-s72w
+Jun  4 17:01:23.999: INFO: Deleting pod "pod-subpath-test-downwardapi-s72w" in namespace "subpath-728"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:01:24.005: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-728" for this suite.
+Jun  4 17:01:30.057: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:01:31.399: INFO: namespace subpath-728 deletion completed in 7.36367943s
+
+• [SLOW TEST:29.752 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with downward pod [LinuxOnly] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:01:31.400: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name s-test-opt-del-66d69d53-86ea-11e9-a2b6-96b18e3e6fac
+STEP: Creating secret with name s-test-opt-upd-66d69d96-86ea-11e9-a2b6-96b18e3e6fac
+STEP: Creating the pod
+STEP: Deleting secret s-test-opt-del-66d69d53-86ea-11e9-a2b6-96b18e3e6fac
+STEP: Updating secret s-test-opt-upd-66d69d96-86ea-11e9-a2b6-96b18e3e6fac
+STEP: Creating secret with name s-test-opt-create-66d69db2-86ea-11e9-a2b6-96b18e3e6fac
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:02:45.008: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-4727" for this suite.
+Jun  4 17:03:07.105: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:03:07.594: INFO: namespace secrets-4727 deletion completed in 22.579252724s
+
+• [SLOW TEST:96.194 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:03:07.595: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-9207
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  4 17:03:07.697: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  4 17:03:27.937: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://172.25.2.166:8080/dial?request=hostName&protocol=http&host=172.25.3.64&port=8080&tries=1'] Namespace:pod-network-test-9207 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 17:03:27.937: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 17:03:28.600: INFO: Waiting for endpoints: map[]
+Jun  4 17:03:28.622: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://172.25.2.166:8080/dial?request=hostName&protocol=http&host=172.25.2.165&port=8080&tries=1'] Namespace:pod-network-test-9207 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 17:03:28.622: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 17:03:29.181: INFO: Waiting for endpoints: map[]
+Jun  4 17:03:29.186: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://172.25.2.166:8080/dial?request=hostName&protocol=http&host=172.25.0.75&port=8080&tries=1'] Namespace:pod-network-test-9207 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 17:03:29.186: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 17:03:30.091: INFO: Waiting for endpoints: map[]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:03:30.091: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-9207" for this suite.
+Jun  4 17:03:54.124: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:03:54.459: INFO: namespace pod-network-test-9207 deletion completed in 24.359824847s
+
+• [SLOW TEST:46.865 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for intra-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:03:54.460: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-bbee9b40-86ea-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume configMaps
+Jun  4 17:03:54.512: INFO: Waiting up to 5m0s for pod "pod-configmaps-bbefcc78-86ea-11e9-a2b6-96b18e3e6fac" in namespace "configmap-9431" to be "success or failure"
+Jun  4 17:03:54.517: INFO: Pod "pod-configmaps-bbefcc78-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.742662ms
+Jun  4 17:03:56.522: INFO: Pod "pod-configmaps-bbefcc78-86ea-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010228879s
+STEP: Saw pod success
+Jun  4 17:03:56.522: INFO: Pod "pod-configmaps-bbefcc78-86ea-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:03:56.592: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-bbefcc78-86ea-11e9-a2b6-96b18e3e6fac container configmap-volume-test: 
+STEP: delete the pod
+Jun  4 17:03:56.647: INFO: Waiting for pod pod-configmaps-bbefcc78-86ea-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:03:56.652: INFO: Pod pod-configmaps-bbefcc78-86ea-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:03:56.652: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-9431" for this suite.
+Jun  4 17:04:02.724: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:04:03.211: INFO: namespace configmap-9431 deletion completed in 6.553620374s
+
+• [SLOW TEST:8.751 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:04:03.212: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on tmpfs
+Jun  4 17:04:03.306: INFO: Waiting up to 5m0s for pod "pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-4418" to be "success or failure"
+Jun  4 17:04:03.313: INFO: Pod "pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.327906ms
+Jun  4 17:04:05.321: INFO: Pod "pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015052219s
+Jun  4 17:04:07.327: INFO: Pod "pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.021421403s
+STEP: Saw pod success
+Jun  4 17:04:07.327: INFO: Pod "pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:04:07.333: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:04:07.421: INFO: Waiting for pod pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:04:07.434: INFO: Pod pod-c12d5e6c-86ea-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:04:07.434: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-4418" for this suite.
+Jun  4 17:04:13.491: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:04:14.306: INFO: namespace emptydir-4418 deletion completed in 6.863193981s
+
+• [SLOW TEST:11.094 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:04:14.306: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 17:04:14.365: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac" in namespace "projected-832" to be "success or failure"
+Jun  4 17:04:14.371: INFO: Pod "downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.54442ms
+Jun  4 17:04:16.421: INFO: Pod "downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.055496516s
+Jun  4 17:04:18.427: INFO: Pod "downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.061555063s
+STEP: Saw pod success
+Jun  4 17:04:18.427: INFO: Pod "downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:04:18.432: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 17:04:18.556: INFO: Waiting for pod downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:04:18.562: INFO: Pod downwardapi-volume-c7c52103-86ea-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:04:18.562: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-832" for this suite.
+Jun  4 17:04:24.599: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:04:25.131: INFO: namespace projected-832 deletion completed in 6.560440825s
+
+• [SLOW TEST:10.825 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:04:25.131: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Performing setup for networking test in namespace pod-network-test-2954
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+Jun  4 17:04:25.172: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+Jun  4 17:04:49.344: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://172.25.0.76:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-2954 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 17:04:49.344: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 17:04:50.192: INFO: Found all expected endpoints: [netserver-0]
+Jun  4 17:04:50.197: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://172.25.3.65:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-2954 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 17:04:50.198: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 17:04:50.989: INFO: Found all expected endpoints: [netserver-1]
+Jun  4 17:04:51.022: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://172.25.2.170:8080/hostName | grep -v '^\s*$'] Namespace:pod-network-test-2954 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+Jun  4 17:04:51.022: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+Jun  4 17:04:52.116: INFO: Found all expected endpoints: [netserver-2]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:04:52.116: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pod-network-test-2954" for this suite.
+Jun  4 17:05:16.147: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:05:16.535: INFO: namespace pod-network-test-2954 deletion completed in 24.411967348s
+
+• [SLOW TEST:51.404 seconds]
+[sig-network] Networking
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:05:16.535: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  4 17:05:16.602: INFO: Waiting up to 5m0s for pod "downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-213" to be "success or failure"
+Jun  4 17:05:16.608: INFO: Pod "downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.094667ms
+Jun  4 17:05:18.614: INFO: Pod "downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012559693s
+Jun  4 17:05:20.625: INFO: Pod "downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.023292689s
+STEP: Saw pod success
+Jun  4 17:05:20.625: INFO: Pod "downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:05:20.644: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac container dapi-container: 
+STEP: delete the pod
+Jun  4 17:05:20.709: INFO: Waiting for pod downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:05:20.719: INFO: Pod downward-api-ecdbc305-86ea-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:05:20.719: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-213" for this suite.
+Jun  4 17:05:26.794: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:05:27.344: INFO: namespace downward-api-213 deletion completed in 6.59435961s
+
+• [SLOW TEST:10.809 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-api-machinery] Secrets 
+  should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:05:27.345: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-f35584cb-86ea-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 17:05:27.466: INFO: Waiting up to 5m0s for pod "pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac" in namespace "secrets-6028" to be "success or failure"
+Jun  4 17:05:27.472: INFO: Pod "pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.184112ms
+Jun  4 17:05:29.489: INFO: Pod "pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023358861s
+Jun  4 17:05:31.497: INFO: Pod "pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.030809988s
+STEP: Saw pod success
+Jun  4 17:05:31.497: INFO: Pod "pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:05:31.592: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac container secret-env-test: 
+STEP: delete the pod
+Jun  4 17:05:31.708: INFO: Waiting for pod pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:05:31.713: INFO: Pod pod-secrets-f35729ad-86ea-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:05:31.713: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6028" for this suite.
+Jun  4 17:05:37.813: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:05:38.307: INFO: namespace secrets-6028 deletion completed in 6.584248455s
+
+• [SLOW TEST:10.962 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32
+  should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:05:38.310: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0604 17:05:44.644333      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  4 17:05:44.644: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:05:44.644: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-1092" for this suite.
+Jun  4 17:05:50.710: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:05:51.151: INFO: namespace gc-1092 deletion completed in 6.457908142s
+
+• [SLOW TEST:12.841 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-api-machinery] Secrets 
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:05:51.151: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating secret secrets-5551/secret-test-017d5426-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 17:05:51.213: INFO: Waiting up to 5m0s for pod "pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac" in namespace "secrets-5551" to be "success or failure"
+Jun  4 17:05:51.217: INFO: Pod "pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.179093ms
+Jun  4 17:05:53.293: INFO: Pod "pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.080268846s
+Jun  4 17:05:55.298: INFO: Pod "pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.085809088s
+STEP: Saw pod success
+Jun  4 17:05:55.299: INFO: Pod "pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:05:55.304: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac container env-test: 
+STEP: delete the pod
+Jun  4 17:05:55.408: INFO: Waiting for pod pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:05:55.413: INFO: Pod pod-configmaps-017e6c89-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:05:55.413: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-5551" for this suite.
+Jun  4 17:06:01.435: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:06:02.143: INFO: namespace secrets-5551 deletion completed in 6.725460087s
+
+• [SLOW TEST:10.993 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32
+  should be consumable via the environment [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should create and stop a replication controller  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:06:02.144: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:265
+[It] should create and stop a replication controller  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a replication controller
+Jun  4 17:06:02.296: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 create -f - --namespace=kubectl-8926'
+Jun  4 17:06:03.211: INFO: stderr: ""
+Jun  4 17:06:03.211: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+Jun  4 17:06:03.211: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-8926'
+Jun  4 17:06:03.422: INFO: stderr: ""
+Jun  4 17:06:03.422: INFO: stdout: "update-demo-nautilus-d58dk update-demo-nautilus-ff8bq "
+Jun  4 17:06:03.423: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-d58dk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-8926'
+Jun  4 17:06:03.672: INFO: stderr: ""
+Jun  4 17:06:03.672: INFO: stdout: ""
+Jun  4 17:06:03.672: INFO: update-demo-nautilus-d58dk is created but not running
+Jun  4 17:06:08.673: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=kubectl-8926'
+Jun  4 17:06:08.777: INFO: stderr: ""
+Jun  4 17:06:08.777: INFO: stdout: "update-demo-nautilus-d58dk update-demo-nautilus-ff8bq "
+Jun  4 17:06:08.777: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-d58dk -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-8926'
+Jun  4 17:06:09.092: INFO: stderr: ""
+Jun  4 17:06:09.092: INFO: stdout: "true"
+Jun  4 17:06:09.092: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-d58dk -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-8926'
+Jun  4 17:06:09.170: INFO: stderr: ""
+Jun  4 17:06:09.170: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 17:06:09.170: INFO: validating pod update-demo-nautilus-d58dk
+Jun  4 17:06:09.438: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 17:06:09.438: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 17:06:09.438: INFO: update-demo-nautilus-d58dk is verified up and running
+Jun  4 17:06:09.438: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-ff8bq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=kubectl-8926'
+Jun  4 17:06:09.606: INFO: stderr: ""
+Jun  4 17:06:09.606: INFO: stdout: "true"
+Jun  4 17:06:09.606: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods update-demo-nautilus-ff8bq -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=kubectl-8926'
+Jun  4 17:06:09.795: INFO: stderr: ""
+Jun  4 17:06:09.795: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+Jun  4 17:06:09.795: INFO: validating pod update-demo-nautilus-ff8bq
+Jun  4 17:06:10.091: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+Jun  4 17:06:10.091: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+Jun  4 17:06:10.091: INFO: update-demo-nautilus-ff8bq is verified up and running
+STEP: using delete to clean up resources
+Jun  4 17:06:10.091: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete --grace-period=0 --force -f - --namespace=kubectl-8926'
+Jun  4 17:06:10.262: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+Jun  4 17:06:10.262: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n"
+Jun  4 17:06:10.262: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-8926'
+Jun  4 17:06:10.349: INFO: stderr: "No resources found.\n"
+Jun  4 17:06:10.350: INFO: stdout: ""
+Jun  4 17:06:10.350: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=update-demo --namespace=kubectl-8926 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 17:06:10.441: INFO: stderr: ""
+Jun  4 17:06:10.441: INFO: stdout: "update-demo-nautilus-d58dk\nupdate-demo-nautilus-ff8bq\n"
+Jun  4 17:06:10.942: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get rc,svc -l name=update-demo --no-headers --namespace=kubectl-8926'
+Jun  4 17:06:11.174: INFO: stderr: "No resources found.\n"
+Jun  4 17:06:11.174: INFO: stdout: ""
+Jun  4 17:06:11.174: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pods -l name=update-demo --namespace=kubectl-8926 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+Jun  4 17:06:11.260: INFO: stderr: ""
+Jun  4 17:06:11.260: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:06:11.261: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-8926" for this suite.
+Jun  4 17:06:33.395: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:06:33.583: INFO: namespace kubectl-8926 deletion completed in 22.290385838s
+
+• [SLOW TEST:31.439 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Update Demo
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should create and stop a replication controller  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir wrapper volumes 
+  should not conflict [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:06:33.583: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir-wrapper
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not conflict [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Cleaning up the secret
+STEP: Cleaning up the configmap
+STEP: Cleaning up the pod
+[AfterEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:06:37.855: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-wrapper-4124" for this suite.
+Jun  4 17:06:43.896: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:06:44.242: INFO: namespace emptydir-wrapper-4124 deletion completed in 6.379270418s
+
+• [SLOW TEST:10.659 seconds]
+[sig-storage] EmptyDir wrapper volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  should not conflict [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:06:44.242: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap configmap-8924/configmap-test-21331109-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume configMaps
+Jun  4 17:06:44.409: INFO: Waiting up to 5m0s for pod "pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac" in namespace "configmap-8924" to be "success or failure"
+Jun  4 17:06:44.416: INFO: Pod "pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 7.026137ms
+Jun  4 17:06:46.425: INFO: Pod "pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015796933s
+Jun  4 17:06:48.432: INFO: Pod "pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.022522749s
+STEP: Saw pod success
+Jun  4 17:06:48.432: INFO: Pod "pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:06:48.490: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac container env-test: 
+STEP: delete the pod
+Jun  4 17:06:48.553: INFO: Waiting for pod pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:06:48.557: INFO: Pod pod-configmaps-21342304-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:06:48.557: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-8924" for this suite.
+Jun  4 17:06:54.595: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:06:54.915: INFO: namespace configmap-8924 deletion completed in 6.350855063s
+
+• [SLOW TEST:10.673 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:32
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] [sig-node] Events 
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:06:54.917: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename events
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: retrieving the pod
+Jun  4 17:06:57.197: INFO: &Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:send-events-278ef53d-86eb-11e9-a2b6-96b18e3e6fac,GenerateName:,Namespace:events-982,SelfLink:/api/v1/namespaces/events-982/pods/send-events-278ef53d-86eb-11e9-a2b6-96b18e3e6fac,UID:2791d9c4-86eb-11e9-83c6-06284416dbe9,ResourceVersion:29717,Generation:0,CreationTimestamp:2019-06-04 17:06:55 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: foo,time: 62768651,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-dw6s9 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-dw6s9,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{p gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 [] []  [{ 0 80 TCP }] [] [] {map[] map[]} [{default-token-dw6s9 true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc003043670} {node.kubernetes.io/unreachable Exists  NoExecute 0xc003043690}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:06:55 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:06:57 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:06:57 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:06:55 +0000 UTC  }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.180,StartTime:2019-06-04 17:06:55 +0000 UTC,ContainerStatuses:[{p {nil ContainerStateRunning{StartedAt:2019-06-04 17:06:56 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 docker-pullable://gcr.io/kubernetes-e2e-test-images/serve-hostname@sha256:bab70473a6d8ef65a22625dc9a1b0f0452e811530fdbe77e4408523460177ff1 docker://54f51ee1b2d05d3022327cf15897d2eff6e94bbd7d1f8da56c6c8969d1aa7401}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+
+STEP: checking for scheduler event about the pod
+Jun  4 17:06:59.209: INFO: Saw scheduler event for our pod.
+STEP: checking for kubelet event about the pod
+Jun  4 17:07:01.233: INFO: Saw kubelet event for our pod.
+STEP: deleting the pod
+[AfterEach] [k8s.io] [sig-node] Events
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:07:01.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "events-982" for this suite.
+Jun  4 17:07:43.305: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:07:43.904: INFO: namespace events-982 deletion completed in 42.653207041s
+
+• [SLOW TEST:48.987 seconds]
+[k8s.io] [sig-node] Events
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:07:43.904: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-44d19fd2-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 17:07:44.169: INFO: Waiting up to 5m0s for pod "pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac" in namespace "secrets-3023" to be "success or failure"
+Jun  4 17:07:44.173: INFO: Pod "pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.555957ms
+Jun  4 17:07:46.178: INFO: Pod "pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009862063s
+Jun  4 17:07:48.183: INFO: Pod "pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01488846s
+STEP: Saw pod success
+Jun  4 17:07:48.183: INFO: Pod "pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:07:48.188: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac container secret-volume-test: 
+STEP: delete the pod
+Jun  4 17:07:48.511: INFO: Waiting for pod pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:07:48.518: INFO: Pod pod-secrets-44d29040-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:07:48.518: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-3023" for this suite.
+Jun  4 17:07:54.701: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:07:55.170: INFO: namespace secrets-3023 deletion completed in 6.575601112s
+
+• [SLOW TEST:11.266 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:07:55.171: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name projected-configmap-test-volume-4b75c0a6-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume configMaps
+Jun  4 17:07:55.312: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac" in namespace "projected-5563" to be "success or failure"
+Jun  4 17:07:55.317: INFO: Pod "pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.857169ms
+Jun  4 17:07:57.323: INFO: Pod "pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011495955s
+Jun  4 17:07:59.331: INFO: Pod "pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018920336s
+STEP: Saw pod success
+Jun  4 17:07:59.331: INFO: Pod "pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:07:59.336: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun  4 17:07:59.377: INFO: Waiting for pod pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:07:59.381: INFO: Pod pod-projected-configmaps-4b7704d4-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:07:59.381: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5563" for this suite.
+Jun  4 17:08:05.407: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:08:06.191: INFO: namespace projected-5563 deletion completed in 6.803392974s
+
+• [SLOW TEST:11.020 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-network] Services 
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:08:06.191: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename services
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:86
+[It] should provide secure master service  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:08:06.294: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "services-5989" for this suite.
+Jun  4 17:08:12.422: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:08:12.632: INFO: namespace services-5989 deletion completed in 6.331270042s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:91
+
+• [SLOW TEST:6.441 seconds]
+[sig-network] Services
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide secure master service  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:08:12.632: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0644 on tmpfs
+Jun  4 17:08:12.768: INFO: Waiting up to 5m0s for pod "pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-2779" to be "success or failure"
+Jun  4 17:08:12.775: INFO: Pod "pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.705114ms
+Jun  4 17:08:14.781: INFO: Pod "pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01258877s
+Jun  4 17:08:16.789: INFO: Pod "pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.020697789s
+STEP: Saw pod success
+Jun  4 17:08:16.789: INFO: Pod "pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:08:16.794: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:08:17.009: INFO: Waiting for pod pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:08:17.014: INFO: Pod pod-55de7dc8-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:08:17.014: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-2779" for this suite.
+Jun  4 17:08:23.102: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:08:24.145: INFO: namespace emptydir-2779 deletion completed in 7.120036686s
+
+• [SLOW TEST:11.513 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:08:24.145: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward api env vars
+Jun  4 17:08:24.404: INFO: Waiting up to 5m0s for pod "downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-6304" to be "success or failure"
+Jun  4 17:08:24.413: INFO: Pod "downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.542902ms
+Jun  4 17:08:26.418: INFO: Pod "downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01381334s
+Jun  4 17:08:28.425: INFO: Pod "downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.0209141s
+STEP: Saw pod success
+Jun  4 17:08:28.425: INFO: Pod "downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:08:28.430: INFO: Trying to get logs from node ip-172-31-9-162.eu-central-1.compute.internal pod downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac container dapi-container: 
+STEP: delete the pod
+Jun  4 17:08:28.610: INFO: Waiting for pod downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:08:28.617: INFO: Pod downward-api-5ccdffa2-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:08:28.617: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-6304" for this suite.
+Jun  4 17:08:34.700: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:08:35.312: INFO: namespace downward-api-6304 deletion completed in 6.689204399s
+
+• [SLOW TEST:11.167 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected combined 
+  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected combined
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:08:35.312: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-projected-all-test-volume-636ba3ca-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating secret with name secret-projected-all-test-volume-636ba3ae-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test Check all projections for projected volume plugin
+Jun  4 17:08:35.716: INFO: Waiting up to 5m0s for pod "projected-volume-636ba379-86eb-11e9-a2b6-96b18e3e6fac" in namespace "projected-9005" to be "success or failure"
+Jun  4 17:08:35.729: INFO: Pod "projected-volume-636ba379-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 12.314659ms
+Jun  4 17:08:37.734: INFO: Pod "projected-volume-636ba379-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01810788s
+STEP: Saw pod success
+Jun  4 17:08:37.734: INFO: Pod "projected-volume-636ba379-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:08:37.739: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod projected-volume-636ba379-86eb-11e9-a2b6-96b18e3e6fac container projected-all-volume-test: 
+STEP: delete the pod
+Jun  4 17:08:37.808: INFO: Waiting for pod projected-volume-636ba379-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:08:37.813: INFO: Pod projected-volume-636ba379-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected combined
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:08:37.814: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-9005" for this suite.
+Jun  4 17:08:43.837: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:08:44.530: INFO: namespace projected-9005 deletion completed in 6.711535214s
+
+• [SLOW TEST:9.219 seconds]
+[sig-storage] Projected combined
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_combined.go:31
+  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:08:44.531: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-upd-68e724ec-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating the pod
+STEP: Waiting for pod with text data
+STEP: Waiting for pod with binary data
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:08:49.132: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-5174" for this suite.
+Jun  4 17:09:11.240: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:09:11.603: INFO: namespace configmap-5174 deletion completed in 22.381489013s
+
+• [SLOW TEST:27.072 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:09:11.603: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the rs
+STEP: Gathering metrics
+W0604 17:09:42.596555      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  4 17:09:42.596: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:09:42.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-5220" for this suite.
+Jun  4 17:09:48.619: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:09:48.943: INFO: namespace gc-5220 deletion completed in 6.341881596s
+
+• [SLOW TEST:37.340 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:09:48.943: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-8f3b80f8-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume configMaps
+Jun  4 17:09:49.102: INFO: Waiting up to 5m0s for pod "pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac" in namespace "configmap-879" to be "success or failure"
+Jun  4 17:09:49.108: INFO: Pod "pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 5.57755ms
+Jun  4 17:09:51.122: INFO: Pod "pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.019387134s
+Jun  4 17:09:53.129: INFO: Pod "pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.026327714s
+STEP: Saw pod success
+Jun  4 17:09:53.129: INFO: Pod "pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:09:53.145: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac container configmap-volume-test: 
+STEP: delete the pod
+Jun  4 17:09:53.186: INFO: Waiting for pod pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:09:53.190: INFO: Pod pod-configmaps-8f3ccc58-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:09:53.190: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-879" for this suite.
+Jun  4 17:09:59.253: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:09:59.727: INFO: namespace configmap-879 deletion completed in 6.491952522s
+
+• [SLOW TEST:10.784 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:09:59.727: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating a watch on configmaps with label A
+STEP: creating a watch on configmaps with label B
+STEP: creating a watch on configmaps with label A or B
+STEP: creating a configmap with label A and ensuring the correct watchers observe the notification
+Jun  4 17:09:59.916: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30446,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  4 17:09:59.916: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30446,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+STEP: modifying configmap A and ensuring the correct watchers observe the notification
+Jun  4 17:10:09.931: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30471,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+Jun  4 17:10:09.931: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30471,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying configmap A again and ensuring the correct watchers observe the notification
+Jun  4 17:10:20.025: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30495,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  4 17:10:20.025: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30495,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+STEP: deleting configmap A and ensuring the correct watchers observe the notification
+Jun  4 17:10:30.040: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30521,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+Jun  4 17:10:30.040: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-a,UID:95be7e98-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30521,Generation:0,CreationTimestamp:2019-06-04 17:09:59 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+STEP: creating a configmap with label B and ensuring the correct watchers observe the notification
+Jun  4 17:10:40.055: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-b,UID:adab149d-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30546,Generation:0,CreationTimestamp:2019-06-04 17:10:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  4 17:10:40.055: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-b,UID:adab149d-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30546,Generation:0,CreationTimestamp:2019-06-04 17:10:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+STEP: deleting configmap B and ensuring the correct watchers observe the notification
+Jun  4 17:10:50.067: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-b,UID:adab149d-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30571,Generation:0,CreationTimestamp:2019-06-04 17:10:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+Jun  4 17:10:50.067: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:watch-7555,SelfLink:/api/v1/namespaces/watch-7555/configmaps/e2e-watch-test-configmap-b,UID:adab149d-86eb-11e9-83c6-06284416dbe9,ResourceVersion:30571,Generation:0,CreationTimestamp:2019-06-04 17:10:40 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Data:map[string]string{},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:11:00.067: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "watch-7555" for this suite.
+Jun  4 17:11:06.205: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:11:06.529: INFO: namespace watch-7555 deletion completed in 6.432376088s
+
+• [SLOW TEST:66.801 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should observe add, update, and delete watch notifications on configmaps [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run deployment 
+  should create a deployment from an image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:11:06.529: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1455
+[It] should create a deployment from an image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun  4 17:11:06.652: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --generator=deployment/v1beta1 --namespace=kubectl-9811'
+Jun  4 17:11:07.082: INFO: stderr: "kubectl run --generator=deployment/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+Jun  4 17:11:07.082: INFO: stdout: "deployment.extensions/e2e-test-nginx-deployment created\n"
+STEP: verifying the deployment e2e-test-nginx-deployment was created
+STEP: verifying the pod controlled by deployment e2e-test-nginx-deployment was created
+[AfterEach] [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1460
+Jun  4 17:11:11.294: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete deployment e2e-test-nginx-deployment --namespace=kubectl-9811'
+Jun  4 17:11:11.394: INFO: stderr: ""
+Jun  4 17:11:11.394: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:11:11.394: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-9811" for this suite.
+Jun  4 17:11:17.500: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:11:18.218: INFO: namespace kubectl-9811 deletion completed in 6.817760352s
+
+• [SLOW TEST:11.689 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should create a deployment from an image  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:11:18.219: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 17:11:18.279: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-612" to be "success or failure"
+Jun  4 17:11:18.288: INFO: Pod "downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.413124ms
+Jun  4 17:11:20.295: INFO: Pod "downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015303749s
+Jun  4 17:11:22.394: INFO: Pod "downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.114860414s
+STEP: Saw pod success
+Jun  4 17:11:22.394: INFO: Pod "downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:11:22.401: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 17:11:22.451: INFO: Waiting for pod downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:11:22.457: INFO: Pod downwardapi-volume-c470fb48-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:11:22.457: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-612" for this suite.
+Jun  4 17:11:28.511: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:11:29.410: INFO: namespace downward-api-612 deletion completed in 6.946934093s
+
+• [SLOW TEST:11.191 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl replace 
+  should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:11:29.410: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[BeforeEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1619
+[It] should update a single-container pod's image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: running the image docker.io/library/nginx:1.14-alpine
+Jun  4 17:11:29.456: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 run e2e-test-nginx-pod --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --labels=run=e2e-test-nginx-pod --namespace=kubectl-6844'
+Jun  4 17:11:29.551: INFO: stderr: ""
+Jun  4 17:11:29.551: INFO: stdout: "pod/e2e-test-nginx-pod created\n"
+STEP: verifying the pod e2e-test-nginx-pod is running
+STEP: verifying the pod e2e-test-nginx-pod was created
+Jun  4 17:11:34.602: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 get pod e2e-test-nginx-pod --namespace=kubectl-6844 -o json'
+Jun  4 17:11:34.699: INFO: stderr: ""
+Jun  4 17:11:34.699: INFO: stdout: "{\n    \"apiVersion\": \"v1\",\n    \"kind\": \"Pod\",\n    \"metadata\": {\n        \"creationTimestamp\": \"2019-06-04T17:11:29Z\",\n        \"labels\": {\n            \"run\": \"e2e-test-nginx-pod\"\n        },\n        \"name\": \"e2e-test-nginx-pod\",\n        \"namespace\": \"kubectl-6844\",\n        \"resourceVersion\": \"30757\",\n        \"selfLink\": \"/api/v1/namespaces/kubectl-6844/pods/e2e-test-nginx-pod\",\n        \"uid\": \"cb2a7378-86eb-11e9-83c6-06284416dbe9\"\n    },\n    \"spec\": {\n        \"containers\": [\n            {\n                \"image\": \"docker.io/library/nginx:1.14-alpine\",\n                \"imagePullPolicy\": \"IfNotPresent\",\n                \"name\": \"e2e-test-nginx-pod\",\n                \"resources\": {},\n                \"terminationMessagePath\": \"/dev/termination-log\",\n                \"terminationMessagePolicy\": \"File\",\n                \"volumeMounts\": [\n                    {\n                        \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n                        \"name\": \"default-token-9lrbh\",\n                        \"readOnly\": true\n                    }\n                ]\n            }\n        ],\n        \"dnsPolicy\": \"ClusterFirst\",\n        \"enableServiceLinks\": true,\n        \"nodeName\": \"ip-172-31-9-156.eu-central-1.compute.internal\",\n        \"priority\": 0,\n        \"restartPolicy\": \"Always\",\n        \"schedulerName\": \"default-scheduler\",\n        \"securityContext\": {},\n        \"serviceAccount\": \"default\",\n        \"serviceAccountName\": \"default\",\n        \"terminationGracePeriodSeconds\": 30,\n        \"tolerations\": [\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/not-ready\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            },\n            {\n                \"effect\": \"NoExecute\",\n                \"key\": \"node.kubernetes.io/unreachable\",\n                \"operator\": \"Exists\",\n                \"tolerationSeconds\": 300\n            }\n        ],\n        \"volumes\": [\n            {\n                \"name\": \"default-token-9lrbh\",\n                \"secret\": {\n                    \"defaultMode\": 420,\n                    \"secretName\": \"default-token-9lrbh\"\n                }\n            }\n        ]\n    },\n    \"status\": {\n        \"conditions\": [\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-04T17:11:29Z\",\n                \"status\": \"True\",\n                \"type\": \"Initialized\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-04T17:11:31Z\",\n                \"status\": \"True\",\n                \"type\": \"Ready\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-04T17:11:31Z\",\n                \"status\": \"True\",\n                \"type\": \"ContainersReady\"\n            },\n            {\n                \"lastProbeTime\": null,\n                \"lastTransitionTime\": \"2019-06-04T17:11:29Z\",\n                \"status\": \"True\",\n                \"type\": \"PodScheduled\"\n            }\n        ],\n        \"containerStatuses\": [\n            {\n                \"containerID\": \"docker://dc44abb010d28338a0aa1c8aa9be6317291c3bddbb366e0e6e0287040a97abc2\",\n                \"image\": \"nginx:1.14-alpine\",\n                \"imageID\": \"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7\",\n                \"lastState\": {},\n                \"name\": \"e2e-test-nginx-pod\",\n                \"ready\": true,\n                \"restartCount\": 0,\n                \"state\": {\n                    \"running\": {\n                        \"startedAt\": \"2019-06-04T17:11:31Z\"\n                    }\n                }\n            }\n        ],\n        \"hostIP\": \"172.31.9.156\",\n        \"phase\": \"Running\",\n        \"podIP\": \"172.25.2.190\",\n        \"qosClass\": \"BestEffort\",\n        \"startTime\": \"2019-06-04T17:11:29Z\"\n    }\n}\n"
+STEP: replace the image in the pod
+Jun  4 17:11:34.699: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 replace -f - --namespace=kubectl-6844'
+Jun  4 17:11:34.997: INFO: stderr: ""
+Jun  4 17:11:34.997: INFO: stdout: "pod/e2e-test-nginx-pod replaced\n"
+STEP: verifying the pod e2e-test-nginx-pod has the right image docker.io/library/busybox:1.29
+[AfterEach] [k8s.io] Kubectl replace
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1624
+Jun  4 17:11:35.002: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 delete pods e2e-test-nginx-pod --namespace=kubectl-6844'
+Jun  4 17:11:41.732: INFO: stderr: ""
+Jun  4 17:11:41.732: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:11:41.732: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6844" for this suite.
+Jun  4 17:11:47.819: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:11:48.521: INFO: namespace kubectl-6844 deletion completed in 6.77851247s
+
+• [SLOW TEST:19.111 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl replace
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should update a single-container pod's image  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run --rm job 
+  should create a job from an image, then delete the job  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:11:48.521: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should create a job from an image, then delete the job  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: executing a command with run --rm and attach with stdin
+Jun  4 17:11:48.632: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 --namespace=kubectl-6064 run e2e-test-rm-busybox-job --image=docker.io/library/busybox:1.29 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''
+Jun  4 17:11:51.848: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\nIf you don't see a command prompt, try pressing enter.\n"
+Jun  4 17:11:51.850: INFO: stdout: "abcd1234stdin closed\njob.batch \"e2e-test-rm-busybox-job\" deleted\n"
+STEP: verifying the job e2e-test-rm-busybox-job was deleted
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:11:54.002: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-6064" for this suite.
+Jun  4 17:12:00.119: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:12:00.916: INFO: namespace kubectl-6064 deletion completed in 6.907138889s
+
+• [SLOW TEST:12.395 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Kubectl run --rm job
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should create a job from an image, then delete the job  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:12:00.916: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:213
+[It] should support proxy with --port 0  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: starting the proxy server
+Jun  4 17:12:01.100: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-441229521 proxy -p 0 --disable-filter'
+STEP: curling proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:12:01.400: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubectl-5898" for this suite.
+Jun  4 17:12:07.521: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:12:07.999: INFO: namespace kubectl-5898 deletion completed in 6.593447332s
+
+• [SLOW TEST:7.083 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:23
+  [k8s.io] Proxy server
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    should support proxy with --port 0  [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:12:07.999: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: setting up watch
+STEP: submitting the pod to kubernetes
+Jun  4 17:12:08.107: INFO: observed the pod list
+STEP: verifying the pod is in kubernetes
+STEP: verifying pod creation was observed
+STEP: deleting the pod gracefully
+STEP: verifying the kubelet observed the termination notice
+Jun  4 17:12:17.496: INFO: no pod exists with the name we were looking for, assuming the termination request was observed and completed
+STEP: verifying pod deletion was observed
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:12:17.508: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-1868" for this suite.
+Jun  4 17:12:23.700: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:12:24.323: INFO: namespace pods-1868 deletion completed in 6.805736906s
+
+• [SLOW TEST:16.324 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be submitted and removed [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:12:24.323: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name projected-configmap-test-volume-map-ebe12e58-86eb-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume configMaps
+Jun  4 17:12:24.455: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-ebe268af-86eb-11e9-a2b6-96b18e3e6fac" in namespace "projected-7097" to be "success or failure"
+Jun  4 17:12:24.496: INFO: Pod "pod-projected-configmaps-ebe268af-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 41.021614ms
+Jun  4 17:12:26.506: INFO: Pod "pod-projected-configmaps-ebe268af-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.050923037s
+STEP: Saw pod success
+Jun  4 17:12:26.506: INFO: Pod "pod-projected-configmaps-ebe268af-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:12:26.511: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-projected-configmaps-ebe268af-86eb-11e9-a2b6-96b18e3e6fac container projected-configmap-volume-test: 
+STEP: delete the pod
+Jun  4 17:12:26.591: INFO: Waiting for pod pod-projected-configmaps-ebe268af-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:12:26.596: INFO: Pod pod-projected-configmaps-ebe268af-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:12:26.596: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-7097" for this suite.
+Jun  4 17:12:32.617: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:12:33.128: INFO: namespace projected-7097 deletion completed in 6.52555124s
+
+• [SLOW TEST:8.804 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:12:33.128: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename sched-pred
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+Jun  4 17:12:33.196: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+Jun  4 17:12:33.207: INFO: Waiting for terminating namespaces to be deleted...
+Jun  4 17:12:33.211: INFO: 
+Logging pods the kubelet thinks is on node ip-172-31-11-48.eu-central-1.compute.internal before test
+Jun  4 17:12:33.322: INFO: kube-proxy-8f464 from kube-system started at 2019-06-04 14:59:19 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: node-local-dns-bqd4m from kube-system started at 2019-06-04 14:59:59 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container node-cache ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: canal-dqcxs from kube-system started at 2019-06-04 14:59:19 +0000 UTC (3 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container calico-node ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: 	Container install-cni ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: coredns-568fd445fd-l7bhx from kube-system started at 2019-06-04 15:00:00 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container coredns ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-tmnxg from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container sonobuoy-worker ready: true, restart count 1
+Jun  4 17:12:33.322: INFO: 	Container systemd-logs ready: true, restart count 1
+Jun  4 17:12:33.322: INFO: node-exporter-fm98z from kube-system started at 2019-06-04 14:59:19 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container kube-rbac-proxy ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: 	Container node-exporter ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: openvpn-client-5bbcf59684-r2rls from kube-system started at 2019-06-04 14:59:59 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container dnat-controller ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: 	Container openvpn-client ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: kubernetes-dashboard-57dcd9448b-pcpsp from kube-system started at 2019-06-04 14:59:59 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container kubernetes-dashboard ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: coredns-568fd445fd-q5bsd from kube-system started at 2019-06-04 15:00:00 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.322: INFO: 	Container coredns ready: true, restart count 0
+Jun  4 17:12:33.322: INFO: 
+Logging pods the kubelet thinks is on node ip-172-31-9-156.eu-central-1.compute.internal before test
+Jun  4 17:12:33.404: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-bmnlh from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.404: INFO: 	Container sonobuoy-worker ready: true, restart count 1
+Jun  4 17:12:33.404: INFO: 	Container systemd-logs ready: true, restart count 1
+Jun  4 17:12:33.404: INFO: node-local-dns-t84xd from kube-system started at 2019-06-04 15:00:24 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.404: INFO: 	Container node-cache ready: true, restart count 0
+Jun  4 17:12:33.404: INFO: sonobuoy from heptio-sonobuoy started at 2019-06-04 15:54:23 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.404: INFO: 	Container kube-sonobuoy ready: true, restart count 0
+Jun  4 17:12:33.404: INFO: kube-proxy-zvrkb from kube-system started at 2019-06-04 14:59:24 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.404: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun  4 17:12:33.404: INFO: canal-5xshg from kube-system started at 2019-06-04 14:59:24 +0000 UTC (3 container statuses recorded)
+Jun  4 17:12:33.404: INFO: 	Container calico-node ready: true, restart count 0
+Jun  4 17:12:33.404: INFO: 	Container install-cni ready: true, restart count 0
+Jun  4 17:12:33.404: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  4 17:12:33.404: INFO: node-exporter-2bq9l from kube-system started at 2019-06-04 14:59:24 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.404: INFO: 	Container kube-rbac-proxy ready: true, restart count 0
+Jun  4 17:12:33.405: INFO: 	Container node-exporter ready: true, restart count 0
+Jun  4 17:12:33.405: INFO: 
+Logging pods the kubelet thinks is on node ip-172-31-9-162.eu-central-1.compute.internal before test
+Jun  4 17:12:33.539: INFO: node-exporter-gkmxz from kube-system started at 2019-06-04 14:59:30 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.539: INFO: 	Container kube-rbac-proxy ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: 	Container node-exporter ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: node-local-dns-wslm4 from kube-system started at 2019-06-04 15:00:11 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.539: INFO: 	Container node-cache ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-psdr6 from heptio-sonobuoy started at 2019-06-04 15:54:25 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.539: INFO: 	Container sonobuoy-worker ready: true, restart count 1
+Jun  4 17:12:33.539: INFO: 	Container systemd-logs ready: true, restart count 1
+Jun  4 17:12:33.539: INFO: kube-proxy-htwg4 from kube-system started at 2019-06-04 14:59:30 +0000 UTC (1 container statuses recorded)
+Jun  4 17:12:33.539: INFO: 	Container kube-proxy ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: canal-6zg8m from kube-system started at 2019-06-04 14:59:30 +0000 UTC (3 container statuses recorded)
+Jun  4 17:12:33.539: INFO: 	Container calico-node ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: 	Container install-cni ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: 	Container kube-flannel ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: sonobuoy-e2e-job-eb1ef483a117445f from heptio-sonobuoy started at 2019-06-04 15:54:24 +0000 UTC (2 container statuses recorded)
+Jun  4 17:12:33.539: INFO: 	Container e2e ready: true, restart count 0
+Jun  4 17:12:33.539: INFO: 	Container sonobuoy-worker ready: true, restart count 0
+[It] validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: verifying the node has the label node ip-172-31-11-48.eu-central-1.compute.internal
+STEP: verifying the node has the label node ip-172-31-9-156.eu-central-1.compute.internal
+STEP: verifying the node has the label node ip-172-31-9-162.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod sonobuoy requesting resource cpu=0m on Node ip-172-31-9-156.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod sonobuoy-e2e-job-eb1ef483a117445f requesting resource cpu=0m on Node ip-172-31-9-162.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-bmnlh requesting resource cpu=0m on Node ip-172-31-9-156.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-psdr6 requesting resource cpu=0m on Node ip-172-31-9-162.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod sonobuoy-systemd-logs-daemon-set-5255c68569c5443e-tmnxg requesting resource cpu=0m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod canal-5xshg requesting resource cpu=350m on Node ip-172-31-9-156.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod canal-6zg8m requesting resource cpu=350m on Node ip-172-31-9-162.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod canal-dqcxs requesting resource cpu=350m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod coredns-568fd445fd-l7bhx requesting resource cpu=100m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod coredns-568fd445fd-q5bsd requesting resource cpu=100m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod kube-proxy-8f464 requesting resource cpu=75m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod kube-proxy-htwg4 requesting resource cpu=75m on Node ip-172-31-9-162.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod kube-proxy-zvrkb requesting resource cpu=75m on Node ip-172-31-9-156.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod kubernetes-dashboard-57dcd9448b-pcpsp requesting resource cpu=75m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod node-exporter-2bq9l requesting resource cpu=20m on Node ip-172-31-9-156.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod node-exporter-fm98z requesting resource cpu=20m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod node-exporter-gkmxz requesting resource cpu=20m on Node ip-172-31-9-162.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod node-local-dns-bqd4m requesting resource cpu=25m on Node ip-172-31-11-48.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod node-local-dns-t84xd requesting resource cpu=25m on Node ip-172-31-9-156.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod node-local-dns-wslm4 requesting resource cpu=25m on Node ip-172-31-9-162.eu-central-1.compute.internal
+Jun  4 17:12:33.706: INFO: Pod openvpn-client-5bbcf59684-r2rls requesting resource cpu=30m on Node ip-172-31-11-48.eu-central-1.compute.internal
+STEP: Starting Pods to consume most of the cluster CPU.
+STEP: Creating another pod that requires unavailable amount of CPU.
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f167f366-86eb-11e9-a2b6-96b18e3e6fac.15a50e1aacbde997], Reason = [Scheduled], Message = [Successfully assigned sched-pred-123/filler-pod-f167f366-86eb-11e9-a2b6-96b18e3e6fac to ip-172-31-11-48.eu-central-1.compute.internal]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f167f366-86eb-11e9-a2b6-96b18e3e6fac.15a50e1afef4b26e], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f167f366-86eb-11e9-a2b6-96b18e3e6fac.15a50e1b034609ff], Reason = [Created], Message = [Created container filler-pod-f167f366-86eb-11e9-a2b6-96b18e3e6fac]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f167f366-86eb-11e9-a2b6-96b18e3e6fac.15a50e1b151a1fcc], Reason = [Started], Message = [Started container filler-pod-f167f366-86eb-11e9-a2b6-96b18e3e6fac]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f1698759-86eb-11e9-a2b6-96b18e3e6fac.15a50e1aace428c4], Reason = [Scheduled], Message = [Successfully assigned sched-pred-123/filler-pod-f1698759-86eb-11e9-a2b6-96b18e3e6fac to ip-172-31-9-156.eu-central-1.compute.internal]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f1698759-86eb-11e9-a2b6-96b18e3e6fac.15a50e1afbdcd51f], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f1698759-86eb-11e9-a2b6-96b18e3e6fac.15a50e1afff8e142], Reason = [Created], Message = [Created container filler-pod-f1698759-86eb-11e9-a2b6-96b18e3e6fac]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f1698759-86eb-11e9-a2b6-96b18e3e6fac.15a50e1b0ecc2587], Reason = [Started], Message = [Started container filler-pod-f1698759-86eb-11e9-a2b6-96b18e3e6fac]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f16bb10b-86eb-11e9-a2b6-96b18e3e6fac.15a50e1ab1691c0d], Reason = [Scheduled], Message = [Successfully assigned sched-pred-123/filler-pod-f16bb10b-86eb-11e9-a2b6-96b18e3e6fac to ip-172-31-9-162.eu-central-1.compute.internal]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f16bb10b-86eb-11e9-a2b6-96b18e3e6fac.15a50e1b0870e9ce], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f16bb10b-86eb-11e9-a2b6-96b18e3e6fac.15a50e1b0d394655], Reason = [Created], Message = [Created container filler-pod-f16bb10b-86eb-11e9-a2b6-96b18e3e6fac]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-f16bb10b-86eb-11e9-a2b6-96b18e3e6fac.15a50e1b1cb6289e], Reason = [Started], Message = [Started container filler-pod-f16bb10b-86eb-11e9-a2b6-96b18e3e6fac]
+STEP: Considering event: 
+Type = [Warning], Name = [additional-pod.15a50e1ba8c1b7db], Reason = [FailedScheduling], Message = [0/3 nodes are available: 3 Insufficient cpu.]
+STEP: removing the label node off the node ip-172-31-9-156.eu-central-1.compute.internal
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node ip-172-31-9-162.eu-central-1.compute.internal
+STEP: verifying the node doesn't have the label node
+STEP: removing the label node off the node ip-172-31-11-48.eu-central-1.compute.internal
+STEP: verifying the node doesn't have the label node
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:12:39.128: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "sched-pred-123" for this suite.
+Jun  4 17:12:45.197: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:12:45.696: INFO: namespace sched-pred-123 deletion completed in 6.562655489s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:12.568 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:12:45.697: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 17:12:45.896: INFO: Waiting up to 5m0s for pod "downwardapi-volume-f89cb650-86eb-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-170" to be "success or failure"
+Jun  4 17:12:45.907: INFO: Pod "downwardapi-volume-f89cb650-86eb-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 11.04384ms
+Jun  4 17:12:47.913: INFO: Pod "downwardapi-volume-f89cb650-86eb-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017045404s
+STEP: Saw pod success
+Jun  4 17:12:47.913: INFO: Pod "downwardapi-volume-f89cb650-86eb-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:12:47.998: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-f89cb650-86eb-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 17:12:48.196: INFO: Waiting for pod downwardapi-volume-f89cb650-86eb-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:12:48.202: INFO: Pod downwardapi-volume-f89cb650-86eb-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:12:48.202: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-170" for this suite.
+Jun  4 17:12:54.238: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:12:55.029: INFO: namespace downward-api-170 deletion completed in 6.819802273s
+
+• [SLOW TEST:9.332 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Service endpoints latency 
+  should not be very high  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-network] Service endpoints latency
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:12:55.029: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename svc-latency
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be very high  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating replication controller svc-latency-rc in namespace svc-latency-5530
+I0604 17:12:55.146583      15 runners.go:184] Created replication controller with name: svc-latency-rc, namespace: svc-latency-5530, replica count: 1
+I0604 17:12:56.196981      15 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0604 17:12:57.197170      15 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+I0604 17:12:58.197353      15 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady 
+Jun  4 17:12:58.315: INFO: Created: latency-svc-8d2ss
+Jun  4 17:12:58.319: INFO: Got endpoints: latency-svc-8d2ss [19.22927ms]
+Jun  4 17:12:58.412: INFO: Created: latency-svc-6bttk
+Jun  4 17:12:58.418: INFO: Got endpoints: latency-svc-6bttk [98.129157ms]
+Jun  4 17:12:58.423: INFO: Created: latency-svc-dbqw2
+Jun  4 17:12:58.429: INFO: Got endpoints: latency-svc-dbqw2 [109.156417ms]
+Jun  4 17:12:58.435: INFO: Created: latency-svc-6sdf4
+Jun  4 17:12:58.443: INFO: Got endpoints: latency-svc-6sdf4 [122.787116ms]
+Jun  4 17:12:58.445: INFO: Created: latency-svc-tv2ks
+Jun  4 17:12:58.463: INFO: Created: latency-svc-ggbkg
+Jun  4 17:12:58.464: INFO: Got endpoints: latency-svc-tv2ks [142.726753ms]
+Jun  4 17:12:58.497: INFO: Created: latency-svc-txfnm
+Jun  4 17:12:58.497: INFO: Created: latency-svc-qmmq9
+Jun  4 17:12:58.497: INFO: Got endpoints: latency-svc-ggbkg [176.543851ms]
+Jun  4 17:12:58.497: INFO: Got endpoints: latency-svc-qmmq9 [175.736654ms]
+Jun  4 17:12:58.497: INFO: Got endpoints: latency-svc-txfnm [176.026979ms]
+Jun  4 17:12:58.596: INFO: Created: latency-svc-l2jc6
+Jun  4 17:12:58.596: INFO: Created: latency-svc-wj8wg
+Jun  4 17:12:58.596: INFO: Created: latency-svc-88bjm
+Jun  4 17:12:58.597: INFO: Created: latency-svc-4t66g
+Jun  4 17:12:58.597: INFO: Created: latency-svc-6g45z
+Jun  4 17:12:58.597: INFO: Created: latency-svc-zxm7s
+Jun  4 17:12:58.597: INFO: Created: latency-svc-hxdd6
+Jun  4 17:12:58.597: INFO: Got endpoints: latency-svc-88bjm [179.295665ms]
+Jun  4 17:12:58.597: INFO: Created: latency-svc-jsdk9
+Jun  4 17:12:58.597: INFO: Got endpoints: latency-svc-jsdk9 [267.921685ms]
+Jun  4 17:12:58.597: INFO: Got endpoints: latency-svc-l2jc6 [276.849308ms]
+Jun  4 17:12:58.597: INFO: Got endpoints: latency-svc-zxm7s [276.020629ms]
+Jun  4 17:12:58.598: INFO: Got endpoints: latency-svc-hxdd6 [277.343757ms]
+Jun  4 17:12:58.598: INFO: Got endpoints: latency-svc-wj8wg [277.053083ms]
+Jun  4 17:12:58.598: INFO: Got endpoints: latency-svc-4t66g [277.04446ms]
+Jun  4 17:12:58.598: INFO: Got endpoints: latency-svc-6g45z [268.46574ms]
+Jun  4 17:12:58.598: INFO: Created: latency-svc-l27fj
+Jun  4 17:12:58.598: INFO: Got endpoints: latency-svc-l27fj [268.590237ms]
+Jun  4 17:12:58.607: INFO: Created: latency-svc-vnpmg
+Jun  4 17:12:58.612: INFO: Got endpoints: latency-svc-vnpmg [182.890719ms]
+Jun  4 17:12:58.618: INFO: Created: latency-svc-r9wcs
+Jun  4 17:12:58.623: INFO: Got endpoints: latency-svc-r9wcs [179.972727ms]
+Jun  4 17:12:58.631: INFO: Created: latency-svc-c8pbj
+Jun  4 17:12:58.696: INFO: Created: latency-svc-rvfgn
+Jun  4 17:12:58.696: INFO: Created: latency-svc-wqggz
+Jun  4 17:12:58.696: INFO: Created: latency-svc-hs9mz
+Jun  4 17:12:58.696: INFO: Created: latency-svc-hqdtj
+Jun  4 17:12:58.697: INFO: Got endpoints: latency-svc-hqdtj [199.636894ms]
+Jun  4 17:12:58.697: INFO: Got endpoints: latency-svc-rvfgn [99.703472ms]
+Jun  4 17:12:58.697: INFO: Got endpoints: latency-svc-wqggz [199.60847ms]
+Jun  4 17:12:58.697: INFO: Got endpoints: latency-svc-c8pbj [233.279723ms]
+Jun  4 17:12:58.697: INFO: Got endpoints: latency-svc-hs9mz [199.897283ms]
+Jun  4 17:12:58.698: INFO: Created: latency-svc-5k5bd
+Jun  4 17:12:58.797: INFO: Got endpoints: latency-svc-5k5bd [199.367629ms]
+Jun  4 17:12:58.798: INFO: Created: latency-svc-fchd5
+Jun  4 17:12:58.798: INFO: Created: latency-svc-dvz44
+Jun  4 17:12:58.798: INFO: Created: latency-svc-xr69r
+Jun  4 17:12:58.798: INFO: Got endpoints: latency-svc-xr69r [200.271426ms]
+Jun  4 17:12:58.798: INFO: Created: latency-svc-w7b9m
+Jun  4 17:12:58.798: INFO: Got endpoints: latency-svc-dvz44 [200.667521ms]
+Jun  4 17:12:58.799: INFO: Created: latency-svc-tmpdg
+Jun  4 17:12:58.799: INFO: Created: latency-svc-ncdfv
+Jun  4 17:12:58.799: INFO: Got endpoints: latency-svc-w7b9m [201.248884ms]
+Jun  4 17:12:58.799: INFO: Created: latency-svc-cmf9m
+Jun  4 17:12:58.799: INFO: Got endpoints: latency-svc-cmf9m [186.919102ms]
+Jun  4 17:12:58.799: INFO: Got endpoints: latency-svc-fchd5 [201.488269ms]
+Jun  4 17:12:58.799: INFO: Got endpoints: latency-svc-tmpdg [201.412986ms]
+Jun  4 17:12:58.799: INFO: Created: latency-svc-pmpjp
+Jun  4 17:12:58.800: INFO: Got endpoints: latency-svc-pmpjp [202.381616ms]
+Jun  4 17:12:58.803: INFO: Got endpoints: latency-svc-ncdfv [204.548451ms]
+Jun  4 17:12:58.803: INFO: Created: latency-svc-gg2mw
+Jun  4 17:12:58.809: INFO: Got endpoints: latency-svc-gg2mw [186.042876ms]
+Jun  4 17:12:58.812: INFO: Created: latency-svc-9gnqb
+Jun  4 17:12:58.815: INFO: Got endpoints: latency-svc-9gnqb [118.664207ms]
+Jun  4 17:12:58.818: INFO: Created: latency-svc-rlv84
+Jun  4 17:12:58.825: INFO: Got endpoints: latency-svc-rlv84 [127.387319ms]
+Jun  4 17:12:58.828: INFO: Created: latency-svc-v9jc2
+Jun  4 17:12:58.896: INFO: Got endpoints: latency-svc-v9jc2 [199.214084ms]
+Jun  4 17:12:58.896: INFO: Created: latency-svc-xl5tp
+Jun  4 17:12:58.896: INFO: Got endpoints: latency-svc-xl5tp [199.840065ms]
+Jun  4 17:12:58.896: INFO: Created: latency-svc-fz9jg
+Jun  4 17:12:58.897: INFO: Created: latency-svc-jmc45
+Jun  4 17:12:58.897: INFO: Created: latency-svc-58h7h
+Jun  4 17:12:58.897: INFO: Got endpoints: latency-svc-58h7h [199.972493ms]
+Jun  4 17:12:58.897: INFO: Created: latency-svc-5h6db
+Jun  4 17:12:58.897: INFO: Got endpoints: latency-svc-5h6db [99.690063ms]
+Jun  4 17:12:58.897: INFO: Created: latency-svc-gwl5t
+Jun  4 17:12:58.897: INFO: Created: latency-svc-l9bxv
+Jun  4 17:12:58.897: INFO: Created: latency-svc-lv7z2
+Jun  4 17:12:58.897: INFO: Created: latency-svc-xwglq
+Jun  4 17:12:58.901: INFO: Created: latency-svc-wjhpl
+Jun  4 17:12:58.908: INFO: Created: latency-svc-lsz58
+Jun  4 17:12:58.996: INFO: Created: latency-svc-hzg5z
+Jun  4 17:12:58.996: INFO: Created: latency-svc-n7dxt
+Jun  4 17:12:58.996: INFO: Created: latency-svc-rz4zc
+Jun  4 17:12:58.996: INFO: Created: latency-svc-sp2p2
+Jun  4 17:12:58.996: INFO: Created: latency-svc-5sl5c
+Jun  4 17:12:58.996: INFO: Created: latency-svc-jfsrq
+Jun  4 17:12:58.996: INFO: Created: latency-svc-jwxkw
+Jun  4 17:12:58.996: INFO: Got endpoints: latency-svc-jmc45 [197.572551ms]
+Jun  4 17:12:58.996: INFO: Got endpoints: latency-svc-gwl5t [197.674122ms]
+Jun  4 17:12:59.011: INFO: Created: latency-svc-z9lj2
+Jun  4 17:12:59.021: INFO: Got endpoints: latency-svc-lv7z2 [223.142422ms]
+Jun  4 17:12:59.035: INFO: Created: latency-svc-lwlhw
+Jun  4 17:12:59.053: INFO: Created: latency-svc-xr4mw
+Jun  4 17:12:59.069: INFO: Got endpoints: latency-svc-l9bxv [269.184281ms]
+Jun  4 17:12:59.096: INFO: Created: latency-svc-9kpwf
+Jun  4 17:12:59.123: INFO: Got endpoints: latency-svc-xwglq [323.948606ms]
+Jun  4 17:12:59.139: INFO: Created: latency-svc-4w8qm
+Jun  4 17:12:59.167: INFO: Got endpoints: latency-svc-fz9jg [368.433216ms]
+Jun  4 17:12:59.178: INFO: Created: latency-svc-4996w
+Jun  4 17:12:59.219: INFO: Got endpoints: latency-svc-wjhpl [420.697659ms]
+Jun  4 17:12:59.239: INFO: Created: latency-svc-h89nd
+Jun  4 17:12:59.277: INFO: Got endpoints: latency-svc-lsz58 [474.427253ms]
+Jun  4 17:12:59.309: INFO: Created: latency-svc-7q5ng
+Jun  4 17:12:59.396: INFO: Got endpoints: latency-svc-rz4zc [587.231314ms]
+Jun  4 17:12:59.396: INFO: Got endpoints: latency-svc-hzg5z [499.366999ms]
+Jun  4 17:12:59.412: INFO: Created: latency-svc-9f7jg
+Jun  4 17:12:59.421: INFO: Got endpoints: latency-svc-sp2p2 [605.209807ms]
+Jun  4 17:12:59.421: INFO: Created: latency-svc-jqt7g
+Jun  4 17:12:59.434: INFO: Created: latency-svc-2cldw
+Jun  4 17:12:59.470: INFO: Got endpoints: latency-svc-5sl5c [645.355783ms]
+Jun  4 17:12:59.509: INFO: Created: latency-svc-httbh
+Jun  4 17:12:59.518: INFO: Got endpoints: latency-svc-jfsrq [621.805015ms]
+Jun  4 17:12:59.596: INFO: Created: latency-svc-x7p45
+Jun  4 17:12:59.596: INFO: Got endpoints: latency-svc-jwxkw [699.669814ms]
+Jun  4 17:12:59.696: INFO: Created: latency-svc-dk7xk
+Jun  4 17:12:59.696: INFO: Got endpoints: latency-svc-n7dxt [799.435444ms]
+Jun  4 17:12:59.696: INFO: Got endpoints: latency-svc-z9lj2 [699.464102ms]
+Jun  4 17:12:59.708: INFO: Created: latency-svc-vtwwc
+Jun  4 17:12:59.721: INFO: Got endpoints: latency-svc-lwlhw [723.767887ms]
+Jun  4 17:12:59.721: INFO: Created: latency-svc-vwfqw
+Jun  4 17:12:59.733: INFO: Created: latency-svc-4zxsp
+Jun  4 17:12:59.768: INFO: Got endpoints: latency-svc-xr4mw [746.76468ms]
+Jun  4 17:12:59.781: INFO: Created: latency-svc-dd26q
+Jun  4 17:12:59.819: INFO: Got endpoints: latency-svc-9kpwf [750.201192ms]
+Jun  4 17:12:59.832: INFO: Created: latency-svc-84z7j
+Jun  4 17:12:59.867: INFO: Got endpoints: latency-svc-4w8qm [744.084897ms]
+Jun  4 17:12:59.879: INFO: Created: latency-svc-nrh8m
+Jun  4 17:12:59.919: INFO: Got endpoints: latency-svc-4996w [751.839173ms]
+Jun  4 17:12:59.931: INFO: Created: latency-svc-4x5vd
+Jun  4 17:12:59.968: INFO: Got endpoints: latency-svc-h89nd [748.545014ms]
+Jun  4 17:12:59.982: INFO: Created: latency-svc-q4pjw
+Jun  4 17:13:00.023: INFO: Got endpoints: latency-svc-7q5ng [745.885263ms]
+Jun  4 17:13:00.037: INFO: Created: latency-svc-8t4dt
+Jun  4 17:13:00.068: INFO: Got endpoints: latency-svc-9f7jg [671.94397ms]
+Jun  4 17:13:00.081: INFO: Created: latency-svc-4fvdf
+Jun  4 17:13:00.120: INFO: Got endpoints: latency-svc-jqt7g [723.170084ms]
+Jun  4 17:13:00.141: INFO: Created: latency-svc-bsjzd
+Jun  4 17:13:00.168: INFO: Got endpoints: latency-svc-2cldw [746.987304ms]
+Jun  4 17:13:00.181: INFO: Created: latency-svc-vhd8r
+Jun  4 17:13:00.221: INFO: Got endpoints: latency-svc-httbh [750.621839ms]
+Jun  4 17:13:00.396: INFO: Got endpoints: latency-svc-x7p45 [877.963287ms]
+Jun  4 17:13:00.396: INFO: Got endpoints: latency-svc-vtwwc [699.942228ms]
+Jun  4 17:13:00.396: INFO: Got endpoints: latency-svc-dk7xk [799.897147ms]
+Jun  4 17:13:00.496: INFO: Created: latency-svc-7t5kx
+Jun  4 17:13:00.497: INFO: Got endpoints: latency-svc-4zxsp [775.555918ms]
+Jun  4 17:13:00.497: INFO: Got endpoints: latency-svc-vwfqw [800.381174ms]
+Jun  4 17:13:00.599: INFO: Created: latency-svc-zvs8v
+Jun  4 17:13:00.599: INFO: Created: latency-svc-99r67
+Jun  4 17:13:00.599: INFO: Created: latency-svc-xlmvw
+Jun  4 17:13:00.599: INFO: Created: latency-svc-dncl7
+Jun  4 17:13:00.599: INFO: Created: latency-svc-crn2q
+Jun  4 17:13:00.599: INFO: Got endpoints: latency-svc-84z7j [780.083566ms]
+Jun  4 17:13:00.599: INFO: Got endpoints: latency-svc-dd26q [831.058335ms]
+Jun  4 17:13:00.701: INFO: Got endpoints: latency-svc-4x5vd [781.747678ms]
+Jun  4 17:13:00.701: INFO: Got endpoints: latency-svc-nrh8m [833.882538ms]
+Jun  4 17:13:00.709: INFO: Created: latency-svc-l4x69
+Jun  4 17:13:00.725: INFO: Created: latency-svc-z57jq
+Jun  4 17:13:00.725: INFO: Got endpoints: latency-svc-q4pjw [757.373279ms]
+Jun  4 17:13:00.733: INFO: Created: latency-svc-h8d5b
+Jun  4 17:13:00.798: INFO: Created: latency-svc-vbv68
+Jun  4 17:13:00.798: INFO: Created: latency-svc-qcjkf
+Jun  4 17:13:00.798: INFO: Got endpoints: latency-svc-8t4dt [775.266644ms]
+Jun  4 17:13:00.819: INFO: Created: latency-svc-jkb6c
+Jun  4 17:13:00.825: INFO: Got endpoints: latency-svc-4fvdf [756.744104ms]
+Jun  4 17:13:00.896: INFO: Got endpoints: latency-svc-bsjzd [776.533056ms]
+Jun  4 17:13:00.907: INFO: Created: latency-svc-h5sfr
+Jun  4 17:13:00.913: INFO: Created: latency-svc-ds65d
+Jun  4 17:13:00.996: INFO: Got endpoints: latency-svc-7t5kx [775.336172ms]
+Jun  4 17:13:00.996: INFO: Got endpoints: latency-svc-vhd8r [828.562353ms]
+Jun  4 17:13:01.008: INFO: Created: latency-svc-b49m6
+Jun  4 17:13:01.014: INFO: Created: latency-svc-25twc
+Jun  4 17:13:01.019: INFO: Got endpoints: latency-svc-crn2q [622.993851ms]
+Jun  4 17:13:01.047: INFO: Created: latency-svc-dblgd
+Jun  4 17:13:01.096: INFO: Got endpoints: latency-svc-xlmvw [699.701999ms]
+Jun  4 17:13:01.196: INFO: Got endpoints: latency-svc-dncl7 [699.535314ms]
+Jun  4 17:13:01.196: INFO: Got endpoints: latency-svc-99r67 [799.739063ms]
+Jun  4 17:13:01.216: INFO: Created: latency-svc-7qb28
+Jun  4 17:13:01.219: INFO: Got endpoints: latency-svc-zvs8v [722.729431ms]
+Jun  4 17:13:01.222: INFO: Created: latency-svc-5gvbp
+Jun  4 17:13:01.230: INFO: Created: latency-svc-jc6ck
+Jun  4 17:13:01.236: INFO: Created: latency-svc-7q5v6
+Jun  4 17:13:01.271: INFO: Got endpoints: latency-svc-l4x69 [671.292449ms]
+Jun  4 17:13:01.313: INFO: Created: latency-svc-gd6wq
+Jun  4 17:13:01.318: INFO: Got endpoints: latency-svc-z57jq [718.518454ms]
+Jun  4 17:13:01.396: INFO: Got endpoints: latency-svc-h8d5b [694.727855ms]
+Jun  4 17:13:01.396: INFO: Created: latency-svc-75pmp
+Jun  4 17:13:01.412: INFO: Created: latency-svc-tnvtm
+Jun  4 17:13:01.427: INFO: Got endpoints: latency-svc-vbv68 [725.340197ms]
+Jun  4 17:13:01.496: INFO: Got endpoints: latency-svc-qcjkf [771.161842ms]
+Jun  4 17:13:01.596: INFO: Created: latency-svc-nrf4k
+Jun  4 17:13:01.596: INFO: Created: latency-svc-v5d2v
+Jun  4 17:13:01.596: INFO: Got endpoints: latency-svc-h5sfr [771.125524ms]
+Jun  4 17:13:01.596: INFO: Got endpoints: latency-svc-jkb6c [797.884962ms]
+Jun  4 17:13:01.610: INFO: Created: latency-svc-82f95
+Jun  4 17:13:01.618: INFO: Created: latency-svc-prs8g
+Jun  4 17:13:01.619: INFO: Got endpoints: latency-svc-ds65d [722.950891ms]
+Jun  4 17:13:01.640: INFO: Created: latency-svc-nq2ln
+Jun  4 17:13:01.668: INFO: Got endpoints: latency-svc-b49m6 [671.713715ms]
+Jun  4 17:13:01.680: INFO: Created: latency-svc-cqlhf
+Jun  4 17:13:01.720: INFO: Got endpoints: latency-svc-25twc [723.844496ms]
+Jun  4 17:13:01.735: INFO: Created: latency-svc-2674z
+Jun  4 17:13:01.797: INFO: Got endpoints: latency-svc-dblgd [777.495617ms]
+Jun  4 17:13:01.817: INFO: Created: latency-svc-7m6z6
+Jun  4 17:13:01.820: INFO: Got endpoints: latency-svc-7qb28 [723.456339ms]
+Jun  4 17:13:01.896: INFO: Got endpoints: latency-svc-5gvbp [699.918305ms]
+Jun  4 17:13:01.912: INFO: Created: latency-svc-zf4hq
+Jun  4 17:13:01.920: INFO: Got endpoints: latency-svc-jc6ck [723.555824ms]
+Jun  4 17:13:01.922: INFO: Created: latency-svc-8wh7v
+Jun  4 17:13:01.941: INFO: Created: latency-svc-cf4tv
+Jun  4 17:13:01.969: INFO: Got endpoints: latency-svc-7q5v6 [749.948823ms]
+Jun  4 17:13:02.008: INFO: Created: latency-svc-ws7fn
+Jun  4 17:13:02.018: INFO: Got endpoints: latency-svc-gd6wq [747.682423ms]
+Jun  4 17:13:02.032: INFO: Created: latency-svc-kqh6f
+Jun  4 17:13:02.068: INFO: Got endpoints: latency-svc-75pmp [750.205352ms]
+Jun  4 17:13:02.142: INFO: Got endpoints: latency-svc-tnvtm [745.41406ms]
+Jun  4 17:13:02.142: INFO: Created: latency-svc-wtdq7
+Jun  4 17:13:02.157: INFO: Created: latency-svc-p2h2j
+Jun  4 17:13:02.169: INFO: Got endpoints: latency-svc-nrf4k [742.298478ms]
+Jun  4 17:13:02.197: INFO: Created: latency-svc-882mw
+Jun  4 17:13:02.218: INFO: Got endpoints: latency-svc-v5d2v [721.138467ms]
+Jun  4 17:13:02.296: INFO: Got endpoints: latency-svc-82f95 [699.837655ms]
+Jun  4 17:13:02.297: INFO: Created: latency-svc-8gfxc
+Jun  4 17:13:02.308: INFO: Created: latency-svc-dk7mb
+Jun  4 17:13:02.318: INFO: Got endpoints: latency-svc-prs8g [721.690538ms]
+Jun  4 17:13:02.330: INFO: Created: latency-svc-2zsxs
+Jun  4 17:13:02.367: INFO: Got endpoints: latency-svc-nq2ln [748.074438ms]
+Jun  4 17:13:02.379: INFO: Created: latency-svc-4hld6
+Jun  4 17:13:02.418: INFO: Got endpoints: latency-svc-cqlhf [750.500791ms]
+Jun  4 17:13:02.431: INFO: Created: latency-svc-v7s95
+Jun  4 17:13:02.481: INFO: Got endpoints: latency-svc-2674z [760.913986ms]
+Jun  4 17:13:02.496: INFO: Created: latency-svc-p5ltr
+Jun  4 17:13:02.518: INFO: Got endpoints: latency-svc-7m6z6 [720.586997ms]
+Jun  4 17:13:02.547: INFO: Created: latency-svc-r8crx
+Jun  4 17:13:02.567: INFO: Got endpoints: latency-svc-zf4hq [747.175125ms]
+Jun  4 17:13:02.596: INFO: Created: latency-svc-wbtsx
+Jun  4 17:13:02.619: INFO: Got endpoints: latency-svc-8wh7v [722.445617ms]
+Jun  4 17:13:02.637: INFO: Created: latency-svc-45cb6
+Jun  4 17:13:02.668: INFO: Got endpoints: latency-svc-cf4tv [747.742054ms]
+Jun  4 17:13:02.696: INFO: Created: latency-svc-8x9vr
+Jun  4 17:13:02.718: INFO: Got endpoints: latency-svc-ws7fn [748.720311ms]
+Jun  4 17:13:02.796: INFO: Got endpoints: latency-svc-kqh6f [777.713381ms]
+Jun  4 17:13:02.810: INFO: Created: latency-svc-7lg45
+Jun  4 17:13:02.810: INFO: Created: latency-svc-k7ccc
+Jun  4 17:13:02.816: INFO: Got endpoints: latency-svc-wtdq7 [748.445812ms]
+Jun  4 17:13:02.830: INFO: Created: latency-svc-hp5jv
+Jun  4 17:13:02.868: INFO: Got endpoints: latency-svc-p2h2j [726.091408ms]
+Jun  4 17:13:02.880: INFO: Created: latency-svc-nl98g
+Jun  4 17:13:02.917: INFO: Got endpoints: latency-svc-882mw [747.982678ms]
+Jun  4 17:13:02.996: INFO: Got endpoints: latency-svc-8gfxc [778.399806ms]
+Jun  4 17:13:02.996: INFO: Created: latency-svc-4r5d4
+Jun  4 17:13:03.009: INFO: Created: latency-svc-vncks
+Jun  4 17:13:03.020: INFO: Got endpoints: latency-svc-dk7mb [723.47104ms]
+Jun  4 17:13:03.040: INFO: Created: latency-svc-tdxww
+Jun  4 17:13:03.070: INFO: Got endpoints: latency-svc-2zsxs [752.120245ms]
+Jun  4 17:13:03.115: INFO: Created: latency-svc-4gg6g
+Jun  4 17:13:03.196: INFO: Got endpoints: latency-svc-v7s95 [777.935976ms]
+Jun  4 17:13:03.196: INFO: Got endpoints: latency-svc-4hld6 [828.997092ms]
+Jun  4 17:13:03.210: INFO: Created: latency-svc-w7gtd
+Jun  4 17:13:03.215: INFO: Created: latency-svc-dzfhb
+Jun  4 17:13:03.218: INFO: Got endpoints: latency-svc-p5ltr [736.708691ms]
+Jun  4 17:13:03.296: INFO: Got endpoints: latency-svc-r8crx [778.321832ms]
+Jun  4 17:13:03.296: INFO: Created: latency-svc-fw7jh
+Jun  4 17:13:03.313: INFO: Created: latency-svc-s7xhd
+Jun  4 17:13:03.317: INFO: Got endpoints: latency-svc-wbtsx [750.010714ms]
+Jun  4 17:13:03.328: INFO: Created: latency-svc-pj5nr
+Jun  4 17:13:03.369: INFO: Got endpoints: latency-svc-45cb6 [750.028117ms]
+Jun  4 17:13:03.380: INFO: Created: latency-svc-2scf2
+Jun  4 17:13:03.418: INFO: Got endpoints: latency-svc-8x9vr [749.529789ms]
+Jun  4 17:13:03.430: INFO: Created: latency-svc-4fzhn
+Jun  4 17:13:03.469: INFO: Got endpoints: latency-svc-7lg45 [750.49447ms]
+Jun  4 17:13:03.481: INFO: Created: latency-svc-4sxnn
+Jun  4 17:13:03.517: INFO: Got endpoints: latency-svc-k7ccc [721.09838ms]
+Jun  4 17:13:03.529: INFO: Created: latency-svc-v7ttn
+Jun  4 17:13:03.568: INFO: Got endpoints: latency-svc-hp5jv [751.082479ms]
+Jun  4 17:13:03.581: INFO: Created: latency-svc-t6g2f
+Jun  4 17:13:03.621: INFO: Got endpoints: latency-svc-nl98g [753.407522ms]
+Jun  4 17:13:03.642: INFO: Created: latency-svc-rxc7d
+Jun  4 17:13:03.669: INFO: Got endpoints: latency-svc-4r5d4 [752.024187ms]
+Jun  4 17:13:03.684: INFO: Created: latency-svc-lmv82
+Jun  4 17:13:03.719: INFO: Got endpoints: latency-svc-vncks [722.68367ms]
+Jun  4 17:13:03.733: INFO: Created: latency-svc-hvk9v
+Jun  4 17:13:03.796: INFO: Got endpoints: latency-svc-tdxww [776.024729ms]
+Jun  4 17:13:03.811: INFO: Created: latency-svc-jq66v
+Jun  4 17:13:03.896: INFO: Got endpoints: latency-svc-w7gtd [699.89933ms]
+Jun  4 17:13:03.896: INFO: Got endpoints: latency-svc-4gg6g [825.804ms]
+Jun  4 17:13:03.912: INFO: Created: latency-svc-4v89w
+Jun  4 17:13:03.914: INFO: Created: latency-svc-z6xqk
+Jun  4 17:13:03.916: INFO: Got endpoints: latency-svc-dzfhb [719.9339ms]
+Jun  4 17:13:03.996: INFO: Created: latency-svc-lt84k
+Jun  4 17:13:03.996: INFO: Got endpoints: latency-svc-fw7jh [778.044223ms]
+Jun  4 17:13:04.096: INFO: Created: latency-svc-w7cwk
+Jun  4 17:13:04.096: INFO: Got endpoints: latency-svc-pj5nr [778.66838ms]
+Jun  4 17:13:04.097: INFO: Got endpoints: latency-svc-s7xhd [800.06767ms]
+Jun  4 17:13:04.197: INFO: Created: latency-svc-4khk7
+Jun  4 17:13:04.197: INFO: Created: latency-svc-d57s4
+Jun  4 17:13:04.197: INFO: Got endpoints: latency-svc-4fzhn [779.188723ms]
+Jun  4 17:13:04.197: INFO: Got endpoints: latency-svc-2scf2 [828.031818ms]
+Jun  4 17:13:04.210: INFO: Created: latency-svc-7lkwf
+Jun  4 17:13:04.222: INFO: Got endpoints: latency-svc-4sxnn [753.09567ms]
+Jun  4 17:13:04.222: INFO: Created: latency-svc-pg5wr
+Jun  4 17:13:04.296: INFO: Created: latency-svc-7hjsr
+Jun  4 17:13:04.296: INFO: Got endpoints: latency-svc-v7ttn [778.87383ms]
+Jun  4 17:13:04.309: INFO: Created: latency-svc-2hht6
+Jun  4 17:13:04.316: INFO: Got endpoints: latency-svc-t6g2f [748.026395ms]
+Jun  4 17:13:04.329: INFO: Created: latency-svc-fd7kz
+Jun  4 17:13:04.396: INFO: Got endpoints: latency-svc-rxc7d [774.742523ms]
+Jun  4 17:13:04.410: INFO: Created: latency-svc-s9m4c
+Jun  4 17:13:04.417: INFO: Got endpoints: latency-svc-lmv82 [747.58339ms]
+Jun  4 17:13:04.432: INFO: Created: latency-svc-vzs6c
+Jun  4 17:13:04.496: INFO: Got endpoints: latency-svc-hvk9v [776.965194ms]
+Jun  4 17:13:04.509: INFO: Created: latency-svc-gs6kq
+Jun  4 17:13:04.518: INFO: Got endpoints: latency-svc-jq66v [721.428143ms]
+Jun  4 17:13:04.536: INFO: Created: latency-svc-tnsfb
+Jun  4 17:13:04.570: INFO: Got endpoints: latency-svc-4v89w [673.349888ms]
+Jun  4 17:13:04.581: INFO: Created: latency-svc-grbm7
+Jun  4 17:13:04.621: INFO: Got endpoints: latency-svc-z6xqk [724.577745ms]
+Jun  4 17:13:04.641: INFO: Created: latency-svc-q8lqk
+Jun  4 17:13:04.669: INFO: Got endpoints: latency-svc-lt84k [751.933026ms]
+Jun  4 17:13:04.685: INFO: Created: latency-svc-bxq9t
+Jun  4 17:13:04.720: INFO: Got endpoints: latency-svc-w7cwk [723.945053ms]
+Jun  4 17:13:04.742: INFO: Created: latency-svc-q6sgn
+Jun  4 17:13:04.769: INFO: Got endpoints: latency-svc-4khk7 [672.484942ms]
+Jun  4 17:13:04.785: INFO: Created: latency-svc-cp8l2
+Jun  4 17:13:04.821: INFO: Got endpoints: latency-svc-d57s4 [723.928866ms]
+Jun  4 17:13:04.833: INFO: Created: latency-svc-5kqjq
+Jun  4 17:13:04.896: INFO: Got endpoints: latency-svc-7lkwf [698.738777ms]
+Jun  4 17:13:04.908: INFO: Created: latency-svc-ptcfp
+Jun  4 17:13:04.997: INFO: Got endpoints: latency-svc-7hjsr [774.177239ms]
+Jun  4 17:13:04.997: INFO: Got endpoints: latency-svc-pg5wr [799.369342ms]
+Jun  4 17:13:05.098: INFO: Created: latency-svc-tmjtt
+Jun  4 17:13:05.098: INFO: Created: latency-svc-vgsnw
+Jun  4 17:13:05.098: INFO: Got endpoints: latency-svc-fd7kz [781.308981ms]
+Jun  4 17:13:05.098: INFO: Got endpoints: latency-svc-2hht6 [801.227588ms]
+Jun  4 17:13:05.114: INFO: Created: latency-svc-dgml7
+Jun  4 17:13:05.119: INFO: Got endpoints: latency-svc-s9m4c [722.762135ms]
+Jun  4 17:13:05.122: INFO: Created: latency-svc-wqxrr
+Jun  4 17:13:05.135: INFO: Created: latency-svc-h47jp
+Jun  4 17:13:05.169: INFO: Got endpoints: latency-svc-vzs6c [751.719724ms]
+Jun  4 17:13:05.198: INFO: Created: latency-svc-dfk7h
+Jun  4 17:13:05.222: INFO: Got endpoints: latency-svc-gs6kq [725.902317ms]
+Jun  4 17:13:05.297: INFO: Created: latency-svc-5t68r
+Jun  4 17:13:05.297: INFO: Got endpoints: latency-svc-tnsfb [779.303645ms]
+Jun  4 17:13:05.315: INFO: Created: latency-svc-xbsbn
+Jun  4 17:13:05.333: INFO: Got endpoints: latency-svc-grbm7 [762.97835ms]
+Jun  4 17:13:05.400: INFO: Got endpoints: latency-svc-q8lqk [778.579004ms]
+Jun  4 17:13:05.400: INFO: Created: latency-svc-7z2dw
+Jun  4 17:13:05.436: INFO: Created: latency-svc-mcr2n
+Jun  4 17:13:05.496: INFO: Got endpoints: latency-svc-q6sgn [775.969797ms]
+Jun  4 17:13:05.497: INFO: Got endpoints: latency-svc-bxq9t [827.791317ms]
+Jun  4 17:13:05.512: INFO: Created: latency-svc-rrzcj
+Jun  4 17:13:05.518: INFO: Got endpoints: latency-svc-cp8l2 [749.007287ms]
+Jun  4 17:13:05.596: INFO: Created: latency-svc-6rhkh
+Jun  4 17:13:05.596: INFO: Got endpoints: latency-svc-5kqjq [775.639701ms]
+Jun  4 17:13:05.609: INFO: Created: latency-svc-c8mwv
+Jun  4 17:13:05.615: INFO: Created: latency-svc-f4sp8
+Jun  4 17:13:05.618: INFO: Got endpoints: latency-svc-ptcfp [722.129679ms]
+Jun  4 17:13:05.631: INFO: Created: latency-svc-n9l7h
+Jun  4 17:13:05.669: INFO: Got endpoints: latency-svc-vgsnw [672.748087ms]
+Jun  4 17:13:05.732: INFO: Created: latency-svc-hq8vd
+Jun  4 17:13:05.732: INFO: Got endpoints: latency-svc-tmjtt [735.364313ms]
+Jun  4 17:13:05.796: INFO: Created: latency-svc-b6g4q
+Jun  4 17:13:05.796: INFO: Got endpoints: latency-svc-dgml7 [698.166672ms]
+Jun  4 17:13:05.809: INFO: Created: latency-svc-2j5b8
+Jun  4 17:13:05.819: INFO: Got endpoints: latency-svc-wqxrr [721.059842ms]
+Jun  4 17:13:05.839: INFO: Created: latency-svc-dmchk
+Jun  4 17:13:05.870: INFO: Got endpoints: latency-svc-h47jp [750.955685ms]
+Jun  4 17:13:06.007: INFO: Created: latency-svc-rwsrg
+Jun  4 17:13:06.007: INFO: Got endpoints: latency-svc-5t68r [784.824531ms]
+Jun  4 17:13:06.007: INFO: Got endpoints: latency-svc-dfk7h [838.371427ms]
+Jun  4 17:13:06.026: INFO: Got endpoints: latency-svc-xbsbn [728.869727ms]
+Jun  4 17:13:06.036: INFO: Created: latency-svc-ckfhk
+Jun  4 17:13:06.048: INFO: Created: latency-svc-lfvfp
+Jun  4 17:13:06.056: INFO: Created: latency-svc-4jdwp
+Jun  4 17:13:06.067: INFO: Got endpoints: latency-svc-7z2dw [727.849925ms]
+Jun  4 17:13:06.107: INFO: Created: latency-svc-wh9f8
+Jun  4 17:13:06.196: INFO: Got endpoints: latency-svc-rrzcj [699.293357ms]
+Jun  4 17:13:06.196: INFO: Got endpoints: latency-svc-mcr2n [795.098978ms]
+Jun  4 17:13:06.212: INFO: Created: latency-svc-7w8xv
+Jun  4 17:13:06.218: INFO: Got endpoints: latency-svc-6rhkh [721.200203ms]
+Jun  4 17:13:06.220: INFO: Created: latency-svc-6cwsj
+Jun  4 17:13:06.270: INFO: Got endpoints: latency-svc-c8mwv [751.896923ms]
+Jun  4 17:13:06.318: INFO: Got endpoints: latency-svc-f4sp8 [721.208895ms]
+Jun  4 17:13:06.368: INFO: Got endpoints: latency-svc-n9l7h [749.597374ms]
+Jun  4 17:13:06.418: INFO: Got endpoints: latency-svc-hq8vd [748.802704ms]
+Jun  4 17:13:06.467: INFO: Got endpoints: latency-svc-b6g4q [735.111648ms]
+Jun  4 17:13:06.519: INFO: Got endpoints: latency-svc-2j5b8 [722.87363ms]
+Jun  4 17:13:06.568: INFO: Got endpoints: latency-svc-dmchk [748.749162ms]
+Jun  4 17:13:06.625: INFO: Got endpoints: latency-svc-rwsrg [754.89527ms]
+Jun  4 17:13:06.668: INFO: Got endpoints: latency-svc-ckfhk [661.185818ms]
+Jun  4 17:13:06.719: INFO: Got endpoints: latency-svc-lfvfp [711.909048ms]
+Jun  4 17:13:06.771: INFO: Got endpoints: latency-svc-4jdwp [744.85775ms]
+Jun  4 17:13:06.821: INFO: Got endpoints: latency-svc-wh9f8 [754.0131ms]
+Jun  4 17:13:06.873: INFO: Got endpoints: latency-svc-7w8xv [676.967433ms]
+Jun  4 17:13:06.920: INFO: Got endpoints: latency-svc-6cwsj [723.680472ms]
+Jun  4 17:13:06.920: INFO: Latencies: [98.129157ms 99.690063ms 99.703472ms 109.156417ms 118.664207ms 122.787116ms 127.387319ms 142.726753ms 175.736654ms 176.026979ms 176.543851ms 179.295665ms 179.972727ms 182.890719ms 186.042876ms 186.919102ms 197.572551ms 197.674122ms 199.214084ms 199.367629ms 199.60847ms 199.636894ms 199.840065ms 199.897283ms 199.972493ms 200.271426ms 200.667521ms 201.248884ms 201.412986ms 201.488269ms 202.381616ms 204.548451ms 223.142422ms 233.279723ms 267.921685ms 268.46574ms 268.590237ms 269.184281ms 276.020629ms 276.849308ms 277.04446ms 277.053083ms 277.343757ms 323.948606ms 368.433216ms 420.697659ms 474.427253ms 499.366999ms 587.231314ms 605.209807ms 621.805015ms 622.993851ms 645.355783ms 661.185818ms 671.292449ms 671.713715ms 671.94397ms 672.484942ms 672.748087ms 673.349888ms 676.967433ms 694.727855ms 698.166672ms 698.738777ms 699.293357ms 699.464102ms 699.535314ms 699.669814ms 699.701999ms 699.837655ms 699.89933ms 699.918305ms 699.942228ms 711.909048ms 718.518454ms 719.9339ms 720.586997ms 721.059842ms 721.09838ms 721.138467ms 721.200203ms 721.208895ms 721.428143ms 721.690538ms 722.129679ms 722.445617ms 722.68367ms 722.729431ms 722.762135ms 722.87363ms 722.950891ms 723.170084ms 723.456339ms 723.47104ms 723.555824ms 723.680472ms 723.767887ms 723.844496ms 723.928866ms 723.945053ms 724.577745ms 725.340197ms 725.902317ms 726.091408ms 727.849925ms 728.869727ms 735.111648ms 735.364313ms 736.708691ms 742.298478ms 744.084897ms 744.85775ms 745.41406ms 745.885263ms 746.76468ms 746.987304ms 747.175125ms 747.58339ms 747.682423ms 747.742054ms 747.982678ms 748.026395ms 748.074438ms 748.445812ms 748.545014ms 748.720311ms 748.749162ms 748.802704ms 749.007287ms 749.529789ms 749.597374ms 749.948823ms 750.010714ms 750.028117ms 750.201192ms 750.205352ms 750.49447ms 750.500791ms 750.621839ms 750.955685ms 751.082479ms 751.719724ms 751.839173ms 751.896923ms 751.933026ms 752.024187ms 752.120245ms 753.09567ms 753.407522ms 754.0131ms 754.89527ms 756.744104ms 757.373279ms 760.913986ms 762.97835ms 771.125524ms 771.161842ms 774.177239ms 774.742523ms 775.266644ms 775.336172ms 775.555918ms 775.639701ms 775.969797ms 776.024729ms 776.533056ms 776.965194ms 777.495617ms 777.713381ms 777.935976ms 778.044223ms 778.321832ms 778.399806ms 778.579004ms 778.66838ms 778.87383ms 779.188723ms 779.303645ms 780.083566ms 781.308981ms 781.747678ms 784.824531ms 795.098978ms 797.884962ms 799.369342ms 799.435444ms 799.739063ms 799.897147ms 800.06767ms 800.381174ms 801.227588ms 825.804ms 827.791317ms 828.031818ms 828.562353ms 828.997092ms 831.058335ms 833.882538ms 838.371427ms 877.963287ms]
+Jun  4 17:13:06.921: INFO: 50 %ile: 724.577745ms
+Jun  4 17:13:06.921: INFO: 90 %ile: 781.747678ms
+Jun  4 17:13:06.921: INFO: 99 %ile: 838.371427ms
+Jun  4 17:13:06.921: INFO: Total sample count: 200
+[AfterEach] [sig-network] Service endpoints latency
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:13:06.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "svc-latency-5530" for this suite.
+Jun  4 17:13:23.005: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:13:23.541: INFO: namespace svc-latency-5530 deletion completed in 16.605625955s
+
+• [SLOW TEST:28.512 seconds]
+[sig-network] Service endpoints latency
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should not be very high  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:13:23.541: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating the pod
+Jun  4 17:13:28.510: INFO: Successfully updated pod "labelsupdate0f25227e-86ec-11e9-a2b6-96b18e3e6fac"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:13:30.713: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-5474" for this suite.
+Jun  4 17:13:52.737: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:13:53.300: INFO: namespace projected-5474 deletion completed in 22.581591406s
+
+• [SLOW TEST:29.759 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:13:53.301: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods
+STEP: Gathering metrics
+W0604 17:14:33.450966      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  4 17:14:33.451: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:14:33.451: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-223" for this suite.
+Jun  4 17:14:39.553: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:14:40.182: INFO: namespace gc-223 deletion completed in 6.648675463s
+
+• [SLOW TEST:46.882 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+S
+------------------------------
+[k8s.io] Probing container 
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:14:40.182: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod liveness-http in namespace container-probe-3416
+Jun  4 17:14:44.398: INFO: Started pod liveness-http in namespace container-probe-3416
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun  4 17:14:44.404: INFO: Initial restart count of pod liveness-http is 0
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:18:45.604: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-3416" for this suite.
+Jun  4 17:19:05.834: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:19:06.616: INFO: namespace container-probe-3416 deletion completed in 21.000182182s
+
+• [SLOW TEST:266.434 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:19:06.617: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating configMap with name configmap-test-volume-map-dba57a75-86ec-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume configMaps
+Jun  4 17:19:06.727: INFO: Waiting up to 5m0s for pod "pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac" in namespace "configmap-3911" to be "success or failure"
+Jun  4 17:19:06.740: INFO: Pod "pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 13.307834ms
+Jun  4 17:19:08.752: INFO: Pod "pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.024720746s
+Jun  4 17:19:10.764: INFO: Pod "pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.036675046s
+STEP: Saw pod success
+Jun  4 17:19:10.764: INFO: Pod "pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:19:10.773: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac container configmap-volume-test: 
+STEP: delete the pod
+Jun  4 17:19:11.202: INFO: Waiting for pod pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:19:11.219: INFO: Pod pod-configmaps-dba7d2cd-86ec-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:19:11.219: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "configmap-3911" for this suite.
+Jun  4 17:19:19.265: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:19:20.415: INFO: namespace configmap-3911 deletion completed in 9.182485082s
+
+• [SLOW TEST:13.798 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:32
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:19:20.415: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 17:19:20.862: INFO: Creating simple daemon set daemon-set
+STEP: Check that daemon pods launch on every node of the cluster.
+Jun  4 17:19:20.994: INFO: Number of nodes with available pods: 0
+Jun  4 17:19:20.994: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:19:22.014: INFO: Number of nodes with available pods: 0
+Jun  4 17:19:22.014: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:19:23.304: INFO: Number of nodes with available pods: 2
+Jun  4 17:19:23.305: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:19:24.032: INFO: Number of nodes with available pods: 2
+Jun  4 17:19:24.032: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:19:25.055: INFO: Number of nodes with available pods: 3
+Jun  4 17:19:25.056: INFO: Number of running nodes: 3, number of available pods: 3
+STEP: Update daemon pods image.
+STEP: Check that daemon pods images are updated.
+Jun  4 17:19:25.159: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:25.159: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:25.159: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:26.185: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:26.185: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:26.185: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:26.185: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:27.214: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:27.214: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:27.214: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:27.214: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:28.204: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:28.204: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:28.204: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:28.204: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:29.181: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:29.181: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:29.181: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:29.181: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:30.303: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:30.303: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:30.303: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:30.303: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:31.180: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:31.180: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:31.180: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:31.180: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:32.223: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:32.223: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:32.223: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:32.223: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:33.403: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:33.403: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:33.403: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:33.403: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:34.182: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:34.182: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:34.182: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:34.182: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:35.208: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:35.208: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:35.208: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:35.208: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:36.197: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:36.197: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:36.197: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:36.197: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:37.210: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:37.210: INFO: Wrong image for pod: daemon-set-tqdxl. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:37.210: INFO: Pod daemon-set-tqdxl is not available
+Jun  4 17:19:37.210: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:38.241: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:38.241: INFO: Pod daemon-set-mbbfg is not available
+Jun  4 17:19:38.241: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:39.187: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:39.187: INFO: Pod daemon-set-mbbfg is not available
+Jun  4 17:19:39.187: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:40.190: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:40.190: INFO: Pod daemon-set-mbbfg is not available
+Jun  4 17:19:40.190: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:41.189: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:41.189: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:42.191: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:42.191: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:42.191: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:43.196: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:43.196: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:43.196: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:44.186: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:44.186: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:44.186: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:45.201: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:45.201: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:45.201: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:46.181: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:46.181: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:46.181: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:47.243: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:47.243: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:47.243: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:48.192: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:48.192: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:48.192: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:49.184: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:49.184: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:49.184: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:50.185: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:50.185: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:50.185: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:51.185: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:51.185: INFO: Wrong image for pod: daemon-set-wx8v5. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:51.185: INFO: Pod daemon-set-wx8v5 is not available
+Jun  4 17:19:52.184: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:52.184: INFO: Pod daemon-set-rt2v2 is not available
+Jun  4 17:19:53.182: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:53.182: INFO: Pod daemon-set-rt2v2 is not available
+Jun  4 17:19:54.185: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:54.185: INFO: Pod daemon-set-rt2v2 is not available
+Jun  4 17:19:55.180: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:56.199: INFO: Wrong image for pod: daemon-set-j5fvp. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: docker.io/library/nginx:1.14-alpine.
+Jun  4 17:19:56.199: INFO: Pod daemon-set-j5fvp is not available
+Jun  4 17:19:57.181: INFO: Pod daemon-set-qlww7 is not available
+STEP: Check that daemon pods are still running on every node of the cluster.
+Jun  4 17:19:57.269: INFO: Number of nodes with available pods: 2
+Jun  4 17:19:57.269: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:19:58.505: INFO: Number of nodes with available pods: 2
+Jun  4 17:19:58.505: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:19:59.308: INFO: Number of nodes with available pods: 2
+Jun  4 17:19:59.308: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:20:00.316: INFO: Number of nodes with available pods: 2
+Jun  4 17:20:00.316: INFO: Node ip-172-31-11-48.eu-central-1.compute.internal is running more than one daemon pod
+Jun  4 17:20:01.291: INFO: Number of nodes with available pods: 3
+Jun  4 17:20:01.291: INFO: Number of running nodes: 3, number of available pods: 3
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-3401, will wait for the garbage collector to delete the pods
+Jun  4 17:20:01.734: INFO: Deleting DaemonSet.extensions daemon-set took: 76.717168ms
+Jun  4 17:20:02.335: INFO: Terminating DaemonSet.extensions daemon-set pods took: 601.123913ms
+Jun  4 17:20:05.146: INFO: Number of nodes with available pods: 0
+Jun  4 17:20:05.146: INFO: Number of running nodes: 0, number of available pods: 0
+Jun  4 17:20:05.154: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/daemonsets-3401/daemonsets","resourceVersion":"33812"},"items":null}
+
+Jun  4 17:20:05.161: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-3401/pods","resourceVersion":"33812"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:20:05.237: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "daemonsets-3401" for this suite.
+Jun  4 17:20:13.621: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:20:14.733: INFO: namespace daemonsets-3401 deletion completed in 9.477159614s
+
+• [SLOW TEST:54.318 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should update pod when spec was updated and update strategy is RollingUpdate [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:20:14.733: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test override arguments
+Jun  4 17:20:14.846: INFO: Waiting up to 5m0s for pod "client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac" in namespace "containers-826" to be "success or failure"
+Jun  4 17:20:14.857: INFO: Pod "client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 11.335354ms
+Jun  4 17:20:16.867: INFO: Pod "client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.021004709s
+Jun  4 17:20:18.875: INFO: Pod "client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02879946s
+STEP: Saw pod success
+Jun  4 17:20:18.875: INFO: Pod "client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:20:18.881: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:20:19.025: INFO: Waiting for pod client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:20:19.034: INFO: Pod client-containers-044120d0-86ed-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:20:19.034: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-826" for this suite.
+Jun  4 17:20:25.095: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:20:25.985: INFO: namespace containers-826 deletion completed in 6.933525847s
+
+• [SLOW TEST:11.252 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:20:25.985: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute poststart exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the pod with lifecycle hook
+STEP: check poststart hook
+STEP: delete the pod with lifecycle hook
+Jun  4 17:20:34.832: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:34.855: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:36.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:36.863: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:38.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:39.003: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:40.857: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:40.866: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:42.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:42.896: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:44.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:44.862: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:46.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:47.013: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:48.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:48.866: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:50.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:50.925: INFO: Pod pod-with-poststart-exec-hook still exists
+Jun  4 17:20:52.856: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear
+Jun  4 17:20:52.867: INFO: Pod pod-with-poststart-exec-hook no longer exists
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:20:52.867: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-1342" for this suite.
+Jun  4 17:21:14.901: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:21:46.597: INFO: namespace container-lifecycle-hook-1342 deletion completed in 53.71928893s
+
+• [SLOW TEST:80.612 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute poststart exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:21:46.597: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace statefulset-6283
+[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Initializing watcher for selector baz=blah,foo=bar
+STEP: Creating stateful set ss in namespace statefulset-6283
+STEP: Waiting until all stateful set ss replicas will be running in namespace statefulset-6283
+Jun  4 17:21:47.312: INFO: Found 0 stateful pods, waiting for 1
+Jun  4 17:21:57.321: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod
+Jun  4 17:21:57.332: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 17:21:58.903: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 17:21:58.903: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 17:21:58.903: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 17:21:58.993: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true
+Jun  4 17:22:09.005: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 17:22:09.005: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 17:22:09.067: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999688s
+Jun  4 17:22:10.093: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.975478128s
+Jun  4 17:22:11.289: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.949583274s
+Jun  4 17:22:12.298: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.75280035s
+Jun  4 17:22:13.393: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.743920225s
+Jun  4 17:22:14.402: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.649149054s
+Jun  4 17:22:15.409: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.640036865s
+Jun  4 17:22:16.416: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.633308455s
+Jun  4 17:22:17.439: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.625663651s
+Jun  4 17:22:18.700: INFO: Verifying statefulset ss doesn't scale past 1 for another 603.311711ms
+STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace statefulset-6283
+Jun  4 17:22:19.707: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:22:20.494: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  4 17:22:20.495: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 17:22:20.495: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 17:22:20.503: INFO: Found 1 stateful pods, waiting for 3
+Jun  4 17:22:30.642: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true
+Jun  4 17:22:30.642: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true
+Jun  4 17:22:30.642: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true
+STEP: Verifying that stateful set ss was scaled up in order
+STEP: Scale down will halt with unhealthy stateful pod
+Jun  4 17:22:30.714: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-0 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 17:22:31.576: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 17:22:31.576: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 17:22:31.576: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 17:22:31.577: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-1 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 17:22:32.243: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 17:22:32.243: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 17:22:32.243: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 17:22:32.243: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+Jun  4 17:22:33.275: INFO: stderr: "+ mv -v /usr/share/nginx/html/index.html /tmp/\n"
+Jun  4 17:22:33.275: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+Jun  4 17:22:33.275: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+Jun  4 17:22:33.275: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 17:22:33.285: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 1
+Jun  4 17:22:43.394: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 17:22:43.394: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 17:22:43.394: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false
+Jun  4 17:22:43.604: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999634s
+Jun  4 17:22:44.663: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.892612866s
+Jun  4 17:22:45.687: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.832934701s
+Jun  4 17:22:46.701: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.809481315s
+Jun  4 17:22:47.707: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.795252874s
+Jun  4 17:22:48.713: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.789074625s
+Jun  4 17:22:49.720: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.782961504s
+Jun  4 17:22:50.727: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.776013662s
+Jun  4 17:22:51.965: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.769628031s
+Jun  4 17:22:56.141: INFO: Verifying statefulset ss doesn't scale past 3 for another 531.607684ms
+STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacestatefulset-6283
+Jun  4 17:22:57.151: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-0 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:22:57.992: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  4 17:22:57.992: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 17:22:57.992: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 17:22:57.992: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-1 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:22:58.794: INFO: stderr: "+ mv -v /tmp/index.html /usr/share/nginx/html/\n"
+Jun  4 17:22:58.794: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+Jun  4 17:22:58.794: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+Jun  4 17:22:58.794: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:22:59.167: INFO: rc: 1
+Jun  4 17:22:59.167: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server: 
+ []  0xc0031a03f0 exit status 1   true [0xc0022d8008 0xc0022d8020 0xc0022d8038] [0xc0022d8008 0xc0022d8020 0xc0022d8038] [0xc0022d8018 0xc0022d8030] [0x9bf9f0 0x9bf9f0] 0xc002bf4600 }:
+Command stdout:
+
+stderr:
+Error from server: 
+
+error:
+exit status 1
+
+Jun  4 17:23:09.168: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:23:09.473: INFO: rc: 1
+Jun  4 17:23:09.473: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec6330 exit status 1   true [0xc001e7e000 0xc001e7e028 0xc001e7e050] [0xc001e7e000 0xc001e7e028 0xc001e7e050] [0xc001e7e018 0xc001e7e040] [0x9bf9f0 0x9bf9f0] 0xc0026f6540 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:23:19.473: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:23:19.556: INFO: rc: 1
+Jun  4 17:23:19.556: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec6810 exit status 1   true [0xc001e7e070 0xc001e7e0a0 0xc001e7e0e8] [0xc001e7e070 0xc001e7e0a0 0xc001e7e0e8] [0xc001e7e098 0xc001e7e0d0] [0x9bf9f0 0x9bf9f0] 0xc0026f6b40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:23:29.556: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:23:29.682: INFO: rc: 1
+Jun  4 17:23:29.682: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0780 exit status 1   true [0xc0022d8040 0xc0022d8058 0xc0022d8070] [0xc0022d8040 0xc0022d8058 0xc0022d8070] [0xc0022d8050 0xc0022d8068] [0x9bf9f0 0x9bf9f0] 0xc002bf4b40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:23:39.682: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:23:40.193: INFO: rc: 1
+Jun  4 17:23:40.193: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec6ba0 exit status 1   true [0xc001e7e0f8 0xc001e7e118 0xc001e7e148] [0xc001e7e0f8 0xc001e7e118 0xc001e7e148] [0xc001e7e108 0xc001e7e138] [0x9bf9f0 0x9bf9f0] 0xc0026f71a0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:23:50.193: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:23:50.278: INFO: rc: 1
+Jun  4 17:23:50.278: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec6f00 exit status 1   true [0xc001e7e178 0xc001e7e1c8 0xc001e7e200] [0xc001e7e178 0xc001e7e1c8 0xc001e7e200] [0xc001e7e1b8 0xc001e7e1e8] [0x9bf9f0 0x9bf9f0] 0xc0026f7800 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:24:00.278: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:24:00.429: INFO: rc: 1
+Jun  4 17:24:00.429: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0b40 exit status 1   true [0xc0022d8078 0xc0022d8090 0xc0022d80a8] [0xc0022d8078 0xc0022d8090 0xc0022d80a8] [0xc0022d8088 0xc0022d80a0] [0x9bf9f0 0x9bf9f0] 0xc002bf50e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:24:10.429: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:24:10.577: INFO: rc: 1
+Jun  4 17:24:10.577: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0ed0 exit status 1   true [0xc0022d80b0 0xc0022d80c8 0xc0022d80e0] [0xc0022d80b0 0xc0022d80c8 0xc0022d80e0] [0xc0022d80c0 0xc0022d80d8] [0x9bf9f0 0x9bf9f0] 0xc002bf5560 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:24:20.578: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:24:20.642: INFO: rc: 1
+Jun  4 17:24:20.642: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a19e0 exit status 1   true [0xc0022d80e8 0xc0022d8100 0xc0022d8118] [0xc0022d80e8 0xc0022d8100 0xc0022d8118] [0xc0022d80f8 0xc0022d8110] [0x9bf9f0 0x9bf9f0] 0xc0029d80c0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:24:30.642: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:24:30.720: INFO: rc: 1
+Jun  4 17:24:30.720: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a1dd0 exit status 1   true [0xc0022d8120 0xc0022d8138 0xc0022d8150] [0xc0022d8120 0xc0022d8138 0xc0022d8150] [0xc0022d8130 0xc0022d8148] [0x9bf9f0 0x9bf9f0] 0xc0029d8420 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:24:40.720: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:24:40.790: INFO: rc: 1
+Jun  4 17:24:40.790: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec7290 exit status 1   true [0xc001e7e220 0xc001e7e248 0xc001e7e270] [0xc001e7e220 0xc001e7e248 0xc001e7e270] [0xc001e7e238 0xc001e7e268] [0x9bf9f0 0x9bf9f0] 0xc001dce000 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:24:50.790: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:24:50.874: INFO: rc: 1
+Jun  4 17:24:50.874: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec7620 exit status 1   true [0xc001e7e280 0xc001e7e2d8 0xc001e7e310] [0xc001e7e280 0xc001e7e2d8 0xc001e7e310] [0xc001e7e2a8 0xc001e7e300] [0x9bf9f0 0x9bf9f0] 0xc001dce3c0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:25:00.875: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:25:01.069: INFO: rc: 1
+Jun  4 17:25:01.069: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec79b0 exit status 1   true [0xc001e7e330 0xc001e7e378 0xc001e7e3a0] [0xc001e7e330 0xc001e7e378 0xc001e7e3a0] [0xc001e7e358 0xc001e7e390] [0x9bf9f0 0x9bf9f0] 0xc001dce720 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:25:11.070: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:25:11.202: INFO: rc: 1
+Jun  4 17:25:11.202: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0360 exit status 1   true [0xc0022d8000 0xc0022d8018 0xc0022d8030] [0xc0022d8000 0xc0022d8018 0xc0022d8030] [0xc0022d8010 0xc0022d8028] [0x9bf9f0 0x9bf9f0] 0xc002bf4600 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:25:21.202: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:25:21.296: INFO: rc: 1
+Jun  4 17:25:21.296: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0720 exit status 1   true [0xc0022d8038 0xc0022d8050 0xc0022d8068] [0xc0022d8038 0xc0022d8050 0xc0022d8068] [0xc0022d8048 0xc0022d8060] [0x9bf9f0 0x9bf9f0] 0xc002bf4b40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:25:31.296: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:25:31.478: INFO: rc: 1
+Jun  4 17:25:31.478: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0ab0 exit status 1   true [0xc0022d8070 0xc0022d8088 0xc0022d80a0] [0xc0022d8070 0xc0022d8088 0xc0022d80a0] [0xc0022d8080 0xc0022d8098] [0x9bf9f0 0x9bf9f0] 0xc002bf50e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:25:41.478: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:25:41.674: INFO: rc: 1
+Jun  4 17:25:41.674: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0e70 exit status 1   true [0xc0022d80a8 0xc0022d80c0 0xc0022d80d8] [0xc0022d80a8 0xc0022d80c0 0xc0022d80d8] [0xc0022d80b8 0xc0022d80d0] [0x9bf9f0 0x9bf9f0] 0xc002bf5560 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:25:51.675: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:25:51.750: INFO: rc: 1
+Jun  4 17:25:51.750: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a1980 exit status 1   true [0xc0022d80e0 0xc0022d80f8 0xc0022d8110] [0xc0022d80e0 0xc0022d80f8 0xc0022d8110] [0xc0022d80f0 0xc0022d8108] [0x9bf9f0 0x9bf9f0] 0xc0026f61e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:26:01.750: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:26:01.877: INFO: rc: 1
+Jun  4 17:26:01.877: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a1d10 exit status 1   true [0xc0022d8118 0xc0022d8130 0xc0022d8148] [0xc0022d8118 0xc0022d8130 0xc0022d8148] [0xc0022d8128 0xc0022d8140] [0x9bf9f0 0x9bf9f0] 0xc0026f67e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:26:11.877: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:26:11.979: INFO: rc: 1
+Jun  4 17:26:11.980: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec6510 exit status 1   true [0xc001e7e000 0xc001e7e028 0xc001e7e050] [0xc001e7e000 0xc001e7e028 0xc001e7e050] [0xc001e7e018 0xc001e7e040] [0x9bf9f0 0x9bf9f0] 0xc0029d82a0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:26:21.980: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:26:22.097: INFO: rc: 1
+Jun  4 17:26:22.097: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec6900 exit status 1   true [0xc001e7e070 0xc001e7e0a0 0xc001e7e0e8] [0xc001e7e070 0xc001e7e0a0 0xc001e7e0e8] [0xc001e7e098 0xc001e7e0d0] [0x9bf9f0 0x9bf9f0] 0xc0029d8600 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:26:32.097: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:26:32.172: INFO: rc: 1
+Jun  4 17:26:32.172: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec6cc0 exit status 1   true [0xc001e7e0f8 0xc001e7e118 0xc001e7e148] [0xc001e7e0f8 0xc001e7e118 0xc001e7e148] [0xc001e7e108 0xc001e7e138] [0x9bf9f0 0x9bf9f0] 0xc0029d8a80 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:26:42.173: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:26:42.270: INFO: rc: 1
+Jun  4 17:26:42.270: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc002ec7080 exit status 1   true [0xc001e7e178 0xc001e7e1c8 0xc001e7e200] [0xc001e7e178 0xc001e7e1c8 0xc001e7e200] [0xc001e7e1b8 0xc001e7e1e8] [0x9bf9f0 0x9bf9f0] 0xc0029d9020 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:26:52.271: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:26:52.469: INFO: rc: 1
+Jun  4 17:26:52.469: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc001cca0f0 exit status 1   true [0xc0022d8150 0xc0022d8168 0xc0022d8180] [0xc0022d8150 0xc0022d8168 0xc0022d8180] [0xc0022d8160 0xc0022d8178] [0x9bf9f0 0x9bf9f0] 0xc0026f6e40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:27:02.469: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:27:02.700: INFO: rc: 1
+Jun  4 17:27:02.700: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0330 exit status 1   true [0xc0022d8008 0xc0022d8020 0xc0022d8038] [0xc0022d8008 0xc0022d8020 0xc0022d8038] [0xc0022d8018 0xc0022d8030] [0x9bf9f0 0x9bf9f0] 0xc002bf4600 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:27:12.700: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:27:12.872: INFO: rc: 1
+Jun  4 17:27:12.872: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc001cca5a0 exit status 1   true [0xc001e7e000 0xc001e7e028 0xc001e7e050] [0xc001e7e000 0xc001e7e028 0xc001e7e050] [0xc001e7e018 0xc001e7e040] [0x9bf9f0 0x9bf9f0] 0xc0026f6540 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:27:22.872: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:27:22.967: INFO: rc: 1
+Jun  4 17:27:22.967: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0750 exit status 1   true [0xc0022d8040 0xc0022d8058 0xc0022d8070] [0xc0022d8040 0xc0022d8058 0xc0022d8070] [0xc0022d8050 0xc0022d8068] [0x9bf9f0 0x9bf9f0] 0xc002bf4b40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:27:32.968: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:27:33.095: INFO: rc: 1
+Jun  4 17:27:33.095: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0b10 exit status 1   true [0xc0022d8078 0xc0022d8090 0xc0022d80a8] [0xc0022d8078 0xc0022d8090 0xc0022d80a8] [0xc0022d8088 0xc0022d80a0] [0x9bf9f0 0x9bf9f0] 0xc002bf50e0 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:27:43.096: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:27:43.272: INFO: rc: 1
+Jun  4 17:27:43.273: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc0031a0ed0 exit status 1   true [0xc0022d80b0 0xc0022d80c8 0xc0022d80e0] [0xc0022d80b0 0xc0022d80c8 0xc0022d80e0] [0xc0022d80c0 0xc0022d80d8] [0x9bf9f0 0x9bf9f0] 0xc002bf5560 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:27:53.273: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:27:53.370: INFO: rc: 1
+Jun  4 17:27:53.370: INFO: Waiting 10s to retry failed RunHostCmd: error running &{/usr/local/bin/kubectl [kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true] []    Error from server (NotFound): pods "ss-2" not found
+ []  0xc001cca930 exit status 1   true [0xc001e7e070 0xc001e7e0a0 0xc001e7e0e8] [0xc001e7e070 0xc001e7e0a0 0xc001e7e0e8] [0xc001e7e098 0xc001e7e0d0] [0x9bf9f0 0x9bf9f0] 0xc0026f6b40 }:
+Command stdout:
+
+stderr:
+Error from server (NotFound): pods "ss-2" not found
+
+error:
+exit status 1
+
+Jun  4 17:28:03.370: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-441229521 exec --namespace=statefulset-6283 ss-2 -- /bin/sh -x -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+Jun  4 17:28:03.494: INFO: rc: 1
+Jun  4 17:28:03.494: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: 
+Jun  4 17:28:03.494: INFO: Scaling statefulset ss to 0
+STEP: Verifying that stateful set ss was scaled down in reverse order
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+Jun  4 17:28:03.674: INFO: Deleting all statefulset in ns statefulset-6283
+Jun  4 17:28:03.679: INFO: Scaling statefulset ss to 0
+Jun  4 17:28:03.693: INFO: Waiting for statefulset status.replicas updated to 0
+Jun  4 17:28:03.703: INFO: Deleting statefulset ss
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:28:04.088: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "statefulset-6283" for this suite.
+Jun  4 17:28:10.116: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:28:10.909: INFO: namespace statefulset-6283 deletion completed in 6.81560298s
+
+• [SLOW TEST:384.312 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+    Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] version v1
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:28:10.910: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename proxy
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy logs on node using proxy subresource  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 17:28:11.030: INFO: (0) /api/v1/nodes/ip-172-31-11-48.eu-central-1.compute.internal/proxy/logs/: 
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+alternatives.log
+amazon/
+>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test override command
+Jun  4 17:28:18.232: INFO: Waiting up to 5m0s for pod "client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac" in namespace "containers-9528" to be "success or failure"
+Jun  4 17:28:18.240: INFO: Pod "client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 8.13457ms
+Jun  4 17:28:20.248: INFO: Pod "client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01607275s
+Jun  4 17:28:22.276: INFO: Pod "client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.043646194s
+STEP: Saw pod success
+Jun  4 17:28:22.276: INFO: Pod "client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:28:22.284: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:28:22.694: INFO: Waiting for pod client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:28:22.720: INFO: Pod client-containers-24606264-86ee-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:28:22.720: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-9528" for this suite.
+Jun  4 17:28:28.746: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:28:29.294: INFO: namespace containers-9528 deletion completed in 6.567677894s
+
+• [SLOW TEST:11.193 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:28:29.295: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0644 on tmpfs
+Jun  4 17:28:29.487: INFO: Waiting up to 5m0s for pod "pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-8596" to be "success or failure"
+Jun  4 17:28:29.496: INFO: Pod "pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 9.303956ms
+Jun  4 17:28:31.528: INFO: Pod "pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.041816718s
+Jun  4 17:28:33.535: INFO: Pod "pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.047968468s
+STEP: Saw pod success
+Jun  4 17:28:33.535: INFO: Pod "pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:28:33.627: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:28:33.819: INFO: Waiting for pod pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:28:33.823: INFO: Pod pod-2b164ad9-86ee-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:28:33.823: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-8596" for this suite.
+Jun  4 17:28:39.875: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:28:40.223: INFO: namespace emptydir-8596 deletion completed in 6.393830754s
+
+• [SLOW TEST:10.929 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-storage] Projected secret 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:28:40.223: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name s-test-opt-del-318e9d0b-86ee-11e9-a2b6-96b18e3e6fac
+STEP: Creating secret with name s-test-opt-upd-318e9d43-86ee-11e9-a2b6-96b18e3e6fac
+STEP: Creating the pod
+STEP: Deleting secret s-test-opt-del-318e9d0b-86ee-11e9-a2b6-96b18e3e6fac
+STEP: Updating secret s-test-opt-upd-318e9d43-86ee-11e9-a2b6-96b18e3e6fac
+STEP: Creating secret with name s-test-opt-create-318e9d5b-86ee-11e9-a2b6-96b18e3e6fac
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:29:55.070: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-3802" for this suite.
+Jun  4 17:30:17.183: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:30:17.903: INFO: namespace projected-3802 deletion completed in 22.821152048s
+
+• [SLOW TEST:97.680 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:33
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:30:17.903: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0777 on node default medium
+Jun  4 17:30:18.289: INFO: Waiting up to 5m0s for pod "pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-9280" to be "success or failure"
+Jun  4 17:30:18.293: INFO: Pod "pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 3.881751ms
+Jun  4 17:30:20.324: INFO: Pod "pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.034841364s
+Jun  4 17:30:22.375: INFO: Pod "pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.085595862s
+STEP: Saw pod success
+Jun  4 17:30:22.375: INFO: Pod "pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:30:22.381: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:30:22.469: INFO: Waiting for pod pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:30:22.474: INFO: Pod pod-6bf0f06f-86ee-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:30:22.474: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-9280" for this suite.
+Jun  4 17:30:28.503: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:30:29.082: INFO: namespace emptydir-9280 deletion completed in 6.599039386s
+
+• [SLOW TEST:11.179 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:30:29.082: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod liveness-exec in namespace container-probe-2165
+Jun  4 17:30:33.302: INFO: Started pod liveness-exec in namespace container-probe-2165
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun  4 17:30:33.307: INFO: Initial restart count of pod liveness-exec is 0
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:34:34.403: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-2165" for this suite.
+Jun  4 17:34:40.568: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:34:41.177: INFO: namespace container-probe-2165 deletion completed in 6.710794437s
+
+• [SLOW TEST:252.094 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:34:41.178: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod liveness-exec in namespace container-probe-6832
+Jun  4 17:34:45.294: INFO: Started pod liveness-exec in namespace container-probe-6832
+STEP: checking the pod's current state and verifying that restartCount is present
+Jun  4 17:34:45.299: INFO: Initial restart count of pod liveness-exec is 0
+Jun  4 17:35:33.678: INFO: Restart count of pod container-probe-6832/liveness-exec is now 1 (48.379129399s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:35:33.737: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-6832" for this suite.
+Jun  4 17:35:39.829: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:35:40.278: INFO: namespace container-probe-6832 deletion completed in 6.534825746s
+
+• [SLOW TEST:59.100 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:35:40.279: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:51
+[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 17:36:02.483: INFO: Container started at 2019-06-04 17:35:41 +0000 UTC, pod became ready at 2019-06-04 17:36:02 +0000 UTC
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:36:02.483: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-probe-5831" for this suite.
+Jun  4 17:36:24.569: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:36:25.280: INFO: namespace container-probe-5831 deletion completed in 22.788135228s
+
+• [SLOW TEST:45.001 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:36:25.281: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for all pods to be garbage collected
+STEP: Gathering metrics
+W0604 17:36:35.500218      15 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+Jun  4 17:36:35.500: INFO: For apiserver_request_total:
+For apiserver_request_latencies_summary:
+For apiserver_init_events_total:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:36:35.500: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "gc-6613" for this suite.
+Jun  4 17:36:41.526: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:36:42.373: INFO: namespace gc-6613 deletion completed in 6.867146966s
+
+• [SLOW TEST:17.092 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should delete pods created by rc when not orphaning [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:36:42.374: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:36:42.453: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "kubelet-test-9311" for this suite.
+Jun  4 17:36:48.486: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:36:48.912: INFO: namespace kubelet-test-9311 deletion completed in 6.45366964s
+
+• [SLOW TEST:6.538 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should be possible to delete [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:36:48.912: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun  4 17:36:48.975: INFO: Waiting up to 5m0s for pod "pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-1722" to be "success or failure"
+Jun  4 17:36:48.979: INFO: Pod "pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 3.93044ms
+Jun  4 17:36:51.068: INFO: Pod "pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.092643313s
+Jun  4 17:36:53.074: INFO: Pod "pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.098735274s
+STEP: Saw pod success
+Jun  4 17:36:53.074: INFO: Pod "pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:36:53.078: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:36:53.295: INFO: Waiting for pod pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:36:53.299: INFO: Pod pod-54cc14a0-86ef-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:36:53.299: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-1722" for this suite.
+Jun  4 17:36:59.323: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:36:59.794: INFO: namespace emptydir-1722 deletion completed in 6.48778857s
+
+• [SLOW TEST:10.882 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicaSet 
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:36:59.795: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename replicaset
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 17:36:59.870: INFO: Creating ReplicaSet my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac
+Jun  4 17:36:59.880: INFO: Pod name my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac: Found 0 pods out of 1
+Jun  4 17:37:04.886: INFO: Pod name my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac: Found 1 pods out of 1
+Jun  4 17:37:04.886: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac" is running
+Jun  4 17:37:04.890: INFO: Pod "my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac-rtwzx" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-04 17:36:59 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-04 17:37:02 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-04 17:37:02 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-06-04 17:36:59 +0000 UTC Reason: Message:}])
+Jun  4 17:37:04.890: INFO: Trying to dial the pod
+Jun  4 17:37:10.226: INFO: Controller my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac: Got expected result from replica 1 [my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac-rtwzx]: "my-hostname-basic-5b4eb2cf-86ef-11e9-a2b6-96b18e3e6fac-rtwzx", 1 of 1 required successes so far
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:37:10.226: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "replicaset-3291" for this suite.
+Jun  4 17:37:16.288: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:37:16.693: INFO: namespace replicaset-3291 deletion completed in 6.427924354s
+
+• [SLOW TEST:16.898 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:37:16.697: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir volume type on node default medium
+Jun  4 17:37:16.880: INFO: Waiting up to 5m0s for pod "pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-9951" to be "success or failure"
+Jun  4 17:37:16.886: INFO: Pod "pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.045038ms
+Jun  4 17:37:18.896: INFO: Pod "pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015151941s
+Jun  4 17:37:20.927: INFO: Pod "pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.046817557s
+STEP: Saw pod success
+Jun  4 17:37:20.927: INFO: Pod "pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:37:20.933: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:37:21.238: INFO: Waiting for pod pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:37:21.242: INFO: Pod pod-65709fff-86ef-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:37:21.242: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-9951" for this suite.
+Jun  4 17:37:27.372: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:37:27.991: INFO: namespace emptydir-9951 deletion completed in 6.723220798s
+
+• [SLOW TEST:11.294 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:37:27.992: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating pod
+Jun  4 17:37:32.282: INFO: Pod pod-hostip-6c199ab3-86ef-11e9-a2b6-96b18e3e6fac has hostIP: 172.31.9.156
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:37:32.282: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-2634" for this suite.
+Jun  4 17:37:54.313: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:37:54.517: INFO: namespace pods-2634 deletion completed in 22.225471232s
+
+• [SLOW TEST:26.526 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should get a host IP [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:37:54.518: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating the pod
+Jun  4 17:37:59.290: INFO: Successfully updated pod "annotationupdate7bf3fafe-86ef-11e9-a2b6-96b18e3e6fac"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:38:01.430: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "projected-8779" for this suite.
+Jun  4 17:38:23.509: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:38:24.000: INFO: namespace projected-8779 deletion completed in 22.511063946s
+
+• [SLOW TEST:29.483 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update annotations on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:38:24.000: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: getting the auto-created API token
+STEP: reading a file in the container
+Jun  4 17:38:28.975: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-6329 pod-service-account-8dea7ebc-86ef-11e9-a2b6-96b18e3e6fac -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/token'
+STEP: reading a file in the container
+Jun  4 17:38:29.856: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-6329 pod-service-account-8dea7ebc-86ef-11e9-a2b6-96b18e3e6fac -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/ca.crt'
+STEP: reading a file in the container
+Jun  4 17:38:30.581: INFO: Running '/usr/local/bin/kubectl exec --namespace=svcaccounts-6329 pod-service-account-8dea7ebc-86ef-11e9-a2b6-96b18e3e6fac -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace'
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:38:31.366: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "svcaccounts-6329" for this suite.
+Jun  4 17:38:37.584: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:38:38.277: INFO: namespace svcaccounts-6329 deletion completed in 6.748153914s
+
+• [SLOW TEST:14.277 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:38:38.278: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:135
+[It] should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying the pod is in kubernetes
+STEP: updating the pod
+Jun  4 17:38:42.988: INFO: Successfully updated pod "pod-update-95fc218e-86ef-11e9-a2b6-96b18e3e6fac"
+STEP: verifying the updated pod is in kubernetes
+Jun  4 17:38:42.999: INFO: Pod update OK
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:38:42.999: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "pods-9522" for this suite.
+Jun  4 17:39:05.029: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:39:05.496: INFO: namespace pods-9522 deletion completed in 22.485075875s
+
+• [SLOW TEST:27.218 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be updated [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:39:05.496: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating secret with name secret-test-a64a0d6d-86ef-11e9-a2b6-96b18e3e6fac
+STEP: Creating a pod to test consume secrets
+Jun  4 17:39:05.689: INFO: Waiting up to 5m0s for pod "pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac" in namespace "secrets-6089" to be "success or failure"
+Jun  4 17:39:05.767: INFO: Pod "pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 78.16338ms
+Jun  4 17:39:07.774: INFO: Pod "pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 2.085092595s
+Jun  4 17:39:09.782: INFO: Pod "pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.093055364s
+STEP: Saw pod success
+Jun  4 17:39:09.782: INFO: Pod "pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:39:09.789: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac container secret-volume-test: 
+STEP: delete the pod
+Jun  4 17:39:09.891: INFO: Waiting for pod pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:39:09.896: INFO: Pod pod-secrets-a64b6ada-86ef-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:39:09.896: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "secrets-6089" for this suite.
+Jun  4 17:39:15.917: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:39:16.399: INFO: namespace secrets-6089 deletion completed in 6.49766628s
+
+• [SLOW TEST:10.903 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:33
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:39:16.400: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test override all
+Jun  4 17:39:16.475: INFO: Waiting up to 5m0s for pod "client-containers-acb5ca29-86ef-11e9-a2b6-96b18e3e6fac" in namespace "containers-6604" to be "success or failure"
+Jun  4 17:39:16.482: INFO: Pod "client-containers-acb5ca29-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 6.469227ms
+Jun  4 17:39:18.571: INFO: Pod "client-containers-acb5ca29-86ef-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.095757223s
+STEP: Saw pod success
+Jun  4 17:39:18.571: INFO: Pod "client-containers-acb5ca29-86ef-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:39:18.576: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod client-containers-acb5ca29-86ef-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:39:18.612: INFO: Waiting for pod client-containers-acb5ca29-86ef-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:39:18.617: INFO: Pod client-containers-acb5ca29-86ef-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:39:18.617: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "containers-6604" for this suite.
+Jun  4 17:39:24.677: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:39:25.784: INFO: namespace containers-6604 deletion completed in 7.161664476s
+
+• [SLOW TEST:9.384 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  should be able to override the image's default command and arguments [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:39:25.784: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test downward API volume plugin
+Jun  4 17:39:25.887: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b255d495-86ef-11e9-a2b6-96b18e3e6fac" in namespace "downward-api-5449" to be "success or failure"
+Jun  4 17:39:25.966: INFO: Pod "downwardapi-volume-b255d495-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 78.857519ms
+Jun  4 17:39:27.971: INFO: Pod "downwardapi-volume-b255d495-86ef-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.083577772s
+STEP: Saw pod success
+Jun  4 17:39:27.971: INFO: Pod "downwardapi-volume-b255d495-86ef-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:39:27.975: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod downwardapi-volume-b255d495-86ef-11e9-a2b6-96b18e3e6fac container client-container: 
+STEP: delete the pod
+Jun  4 17:39:28.136: INFO: Waiting for pod downwardapi-volume-b255d495-86ef-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:39:28.144: INFO: Pod downwardapi-volume-b255d495-86ef-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:39:28.145: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "downward-api-5449" for this suite.
+Jun  4 17:39:34.168: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:39:34.610: INFO: namespace downward-api-5449 deletion completed in 6.459972594s
+
+• [SLOW TEST:8.826 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide podname only [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:39:34.610: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute prestop exec hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: create the pod with lifecycle hook
+STEP: delete the pod with lifecycle hook
+Jun  4 17:39:42.981: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:42.987: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:44.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:44.992: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:46.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:47.066: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:48.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:48.992: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:50.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:51.066: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:52.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:52.994: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:54.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:55.170: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:56.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:57.029: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:39:58.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:39:58.994: INFO: Pod pod-with-prestop-exec-hook still exists
+Jun  4 17:40:00.987: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear
+Jun  4 17:40:01.073: INFO: Pod pod-with-prestop-exec-hook no longer exists
+STEP: check prestop hook
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:40:01.189: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "container-lifecycle-hook-5921" for this suite.
+Jun  4 17:40:23.295: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:40:23.886: INFO: namespace container-lifecycle-hook-5921 deletion completed in 22.61471122s
+
+• [SLOW TEST:49.276 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:687
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute prestop exec hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:40:23.887: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating a pod to test emptydir 0666 on node default medium
+Jun  4 17:40:23.966: INFO: Waiting up to 5m0s for pod "pod-d4ef3cd1-86ef-11e9-a2b6-96b18e3e6fac" in namespace "emptydir-4709" to be "success or failure"
+Jun  4 17:40:23.971: INFO: Pod "pod-d4ef3cd1-86ef-11e9-a2b6-96b18e3e6fac": Phase="Pending", Reason="", readiness=false. Elapsed: 4.980006ms
+Jun  4 17:40:25.978: INFO: Pod "pod-d4ef3cd1-86ef-11e9-a2b6-96b18e3e6fac": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012499882s
+STEP: Saw pod success
+Jun  4 17:40:25.978: INFO: Pod "pod-d4ef3cd1-86ef-11e9-a2b6-96b18e3e6fac" satisfied condition "success or failure"
+Jun  4 17:40:25.984: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-d4ef3cd1-86ef-11e9-a2b6-96b18e3e6fac container test-container: 
+STEP: delete the pod
+Jun  4 17:40:26.103: INFO: Waiting for pod pod-d4ef3cd1-86ef-11e9-a2b6-96b18e3e6fac to disappear
+Jun  4 17:40:26.112: INFO: Pod pod-d4ef3cd1-86ef-11e9-a2b6-96b18e3e6fac no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:40:26.112: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "emptydir-4709" for this suite.
+Jun  4 17:40:32.155: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:40:32.494: INFO: namespace emptydir-4709 deletion completed in 6.369467006s
+
+• [SLOW TEST:8.607 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:41
+  should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:40:32.494: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+STEP: Creating pod pod-subpath-test-configmap-6b2w
+STEP: Creating a pod to test atomic-volume-subpath
+Jun  4 17:40:32.571: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-6b2w" in namespace "subpath-1409" to be "success or failure"
+Jun  4 17:40:32.582: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Pending", Reason="", readiness=false. Elapsed: 11.007736ms
+Jun  4 17:40:34.587: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 2.016213901s
+Jun  4 17:40:36.594: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 4.023045622s
+Jun  4 17:40:38.599: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 6.027997481s
+Jun  4 17:40:40.609: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 8.03786776s
+Jun  4 17:40:42.615: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 10.043994586s
+Jun  4 17:40:44.621: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 12.050349352s
+Jun  4 17:40:46.628: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 14.057296182s
+Jun  4 17:40:48.634: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 16.063380662s
+Jun  4 17:40:50.639: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 18.068659485s
+Jun  4 17:40:52.646: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 20.074884566s
+Jun  4 17:40:54.652: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Running", Reason="", readiness=true. Elapsed: 22.081635961s
+Jun  4 17:40:56.658: INFO: Pod "pod-subpath-test-configmap-6b2w": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.087506333s
+STEP: Saw pod success
+Jun  4 17:40:56.658: INFO: Pod "pod-subpath-test-configmap-6b2w" satisfied condition "success or failure"
+Jun  4 17:40:56.663: INFO: Trying to get logs from node ip-172-31-9-156.eu-central-1.compute.internal pod pod-subpath-test-configmap-6b2w container test-container-subpath-configmap-6b2w: 
+STEP: delete the pod
+Jun  4 17:40:57.171: INFO: Waiting for pod pod-subpath-test-configmap-6b2w to disappear
+Jun  4 17:40:57.176: INFO: Pod pod-subpath-test-configmap-6b2w no longer exists
+STEP: Deleting pod pod-subpath-test-configmap-6b2w
+Jun  4 17:40:57.176: INFO: Deleting pod "pod-subpath-test-configmap-6b2w" in namespace "subpath-1409"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:40:57.181: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "subpath-1409" for this suite.
+Jun  4 17:41:03.271: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:41:03.643: INFO: namespace subpath-1409 deletion completed in 6.452523124s
+
+• [SLOW TEST:31.149 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]
+    /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:149
+STEP: Creating a kubernetes client
+Jun  4 17:41:03.643: INFO: >>> kubeConfig: /tmp/kubeconfig-441229521
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+Jun  4 17:41:03.768: INFO: Pod name cleanup-pod: Found 0 pods out of 1
+Jun  4 17:41:08.776: INFO: Pod name cleanup-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+Jun  4 17:41:08.777: INFO: Creating deployment test-cleanup-deployment
+STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+Jun  4 17:41:12.937: INFO: Deployment "test-cleanup-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment,GenerateName:,Namespace:deployment-2768,SelfLink:/apis/apps/v1/namespaces/deployment-2768/deployments/test-cleanup-deployment,UID:efbbede9-86ef-11e9-83c6-06284416dbe9,ResourceVersion:37869,Generation:1,CreationTimestamp:2019-06-04 17:41:08 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 1,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-06-04 17:41:08 +0000 UTC 2019-06-04 17:41:08 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-06-04 17:41:10 +0000 UTC 2019-06-04 17:41:08 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-cleanup-deployment-55cbfbc8f5" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+Jun  4 17:41:12.942: INFO: New ReplicaSet "test-cleanup-deployment-55cbfbc8f5" of Deployment "test-cleanup-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-55cbfbc8f5,GenerateName:,Namespace:deployment-2768,SelfLink:/apis/apps/v1/namespaces/deployment-2768/replicasets/test-cleanup-deployment-55cbfbc8f5,UID:efc00d45-86ef-11e9-83c6-06284416dbe9,ResourceVersion:37858,Generation:1,CreationTimestamp:2019-06-04 17:41:08 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-cleanup-deployment efbbede9-86ef-11e9-83c6-06284416dbe9 0xc0024e70d7 0xc0024e70d8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+Jun  4 17:41:12.947: INFO: Pod "test-cleanup-deployment-55cbfbc8f5-dnvqf" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-55cbfbc8f5-dnvqf,GenerateName:test-cleanup-deployment-55cbfbc8f5-,Namespace:deployment-2768,SelfLink:/api/v1/namespaces/deployment-2768/pods/test-cleanup-deployment-55cbfbc8f5-dnvqf,UID:efc16bd0-86ef-11e9-83c6-06284416dbe9,ResourceVersion:37857,Generation:0,CreationTimestamp:2019-06-04 17:41:08 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 55cbfbc8f5,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-cleanup-deployment-55cbfbc8f5 efc00d45-86ef-11e9-83c6-06284416dbe9 0xc0024e76f7 0xc0024e76f8}],Finalizers:[],ClusterName:,Initializers:nil,ManagedFields:[],},Spec:PodSpec{Volumes:[{default-token-vx9nv {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-vx9nv,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-vx9nv true /var/run/secrets/kubernetes.io/serviceaccount   }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-172-31-9-156.eu-central-1.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists  NoExecute 0xc0024e7790} {node.kubernetes.io/unreachable Exists  NoExecute 0xc0024e77b0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:41:08 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:41:10 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:41:10 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-04 17:41:08 +0000 UTC  }],Message:,Reason:,HostIP:172.31.9.156,PodIP:172.25.2.232,StartTime:2019-06-04 17:41:08 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-06-04 17:41:10 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://45acd28c4edc5db4a86a11322189e902f53971761ffaec1784a746819898e486}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:150
+Jun  4 17:41:12.947: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "deployment-2768" for this suite.
+Jun  4 17:41:18.968: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+Jun  4 17:41:20.023: INFO: namespace deployment-2768 deletion completed in 7.07099702s
+
+• [SLOW TEST:16.380 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should delete old replica sets [Conformance]
+  /workspace/anago-v1.14.1-beta.0.44+b7394102d6ef77/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:692
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSJun  4 17:41:20.024: INFO: Running AfterSuite actions on all nodes
+Jun  4 17:41:20.024: INFO: Running AfterSuite actions on node 1
+Jun  4 17:41:20.024: INFO: Skipping dumping logs from cluster
+
+Ran 204 of 3584 Specs in 6411.431 seconds
+SUCCESS! -- 204 Passed | 0 Failed | 0 Pending | 3380 Skipped PASS
+
+Ginkgo ran 1 suite in 1h46m52.789819352s
+Test Suite Passed
diff --git a/v1.14/kubermatic/junit_01.xml b/v1.14/kubermatic/junit_01.xml
new file mode 100644
index 0000000000..9776f5b80b
--- /dev/null
+++ b/v1.14/kubermatic/junit_01.xml
@@ -0,0 +1,10347 @@
+
+  
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+  
\ No newline at end of file