From 79ecbee66d929b34cd86bef2789480d8a4272cbc Mon Sep 17 00:00:00 2001 From: Alexander Chadin Date: Wed, 8 May 2019 18:44:20 +0300 Subject: [PATCH] Conformance results for v1.13/mcs (#557) --- v1.13/mcs/PRODUCT.yaml | 8 + v1.13/mcs/README.md | 32 + v1.13/mcs/e2e.log | 10278 +++++++++++++++++++++++++++++++++++++++ v1.13/mcs/junit_01.xml | 6086 +++++++++++++++++++++++ 4 files changed, 16404 insertions(+) create mode 100644 v1.13/mcs/PRODUCT.yaml create mode 100644 v1.13/mcs/README.md create mode 100644 v1.13/mcs/e2e.log create mode 100644 v1.13/mcs/junit_01.xml diff --git a/v1.13/mcs/PRODUCT.yaml b/v1.13/mcs/PRODUCT.yaml new file mode 100644 index 0000000000..8a827eb937 --- /dev/null +++ b/v1.13/mcs/PRODUCT.yaml @@ -0,0 +1,8 @@ +vendor: Mail.Ru Cloud Solutions +name: Mail.Ru Cloud Containers +version: v1.13.3-mcs.1 +website_url: https://mcs.mail.ru/en/ +documentation_url: https://mcs.mail.ru/help/containers +product_logo_url: https://hb.bizmrg.com/mcs-static/___files/logo/mcs_logo_v1_black_text.svg +type: hosted +description: Mail.Ru Cloud Containers is a managed service for Kubernetes that makes it easy for you to run Kubernetes clusters on any OpenStack-compatible cloud. Mail.Ru Cloud Solutions provides hybrid cloud-based services that enable businesses to run their workload in a public or private cloud. diff --git a/v1.13/mcs/README.md b/v1.13/mcs/README.md new file mode 100644 index 0000000000..4dc3c4e0ab --- /dev/null +++ b/v1.13/mcs/README.md @@ -0,0 +1,32 @@ +## To Reproduce: + +Note: to reproduce you need a Mail.Ru Cloud Solutions account. You can create it by signing up at https://mcs.mail.ru/app/en/signup/ + +### Create cluster + +Login to Mail.Ru Cloud Solutions and activate your account at https://mcs.mail.ru/app/en/ + +Here you can find out how to create a single cluster: + +https://mcs.mail.ru/help/kubernetes/clusterfast + +### Get Credentials + +Open up the firewall from your IP to the cluster for kubectl: + +Head to Network Settings, add a new rule and enter your IP address (from), Kubernetes servers (to) and TCP (protocol) 6443 (port). + +Go to https://mcs.mail.ru/app/en/services/containers/list/ then select your cluster and download kubeconfig archive. + +After you unpack the archive, set KUBECONFIG env variable and check access to your cluster: + +```bash +export KUBECONFIG=~/Downloads//config +kubectl get nodes +``` + +### Run the tests + +You can use the Sonobuoy Scanner to run the tests: + +https://scanner.heptio.com/ diff --git a/v1.13/mcs/e2e.log b/v1.13/mcs/e2e.log new file mode 100644 index 0000000000..303a958300 --- /dev/null +++ b/v1.13/mcs/e2e.log @@ -0,0 +1,10278 @@ +I0506 07:27:21.270688 14 test_context.go:359] Using a temporary kubeconfig file from in-cluster config : /tmp/kubeconfig-307990706 +I0506 07:27:21.270785 14 e2e.go:224] Starting e2e run "62469e8e-6fd0-11e9-a235-ba138c0d9035" on Ginkgo node 1 +Running Suite: Kubernetes e2e suite +=================================== +Random Seed: 1557127640 - Will randomize all specs +Will run 201 of 2161 specs + +May 6 07:27:21.474: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 07:27:21.477: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable +May 6 07:27:21.510: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready +May 6 07:27:21.551: INFO: 6 / 6 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) +May 6 07:27:21.551: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready. +May 6 07:27:21.551: INFO: Waiting up to 5m0s for all daemonsets in namespace 'kube-system' to start +May 6 07:27:21.563: INFO: 2 / 2 pods ready in namespace 'kube-system' in daemonset 'calico-node' (0 seconds elapsed) +May 6 07:27:21.563: INFO: 1 / 1 pods ready in namespace 'kube-system' in daemonset 'openstack-cloud-controller-manager' (0 seconds elapsed) +May 6 07:27:21.564: INFO: e2e test version: v1.13.3 +May 6 07:27:21.566: INFO: kube-apiserver version: v1.13.3 +SSS +------------------------------ +[k8s.io] Probing container + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:27:21.566: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-probe +May 6 07:27:21.716: INFO: No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled. +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-fbk48 +May 6 07:27:29.745: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-fbk48 +STEP: checking the pod's current state and verifying that restartCount is present +May 6 07:27:29.749: INFO: Initial restart count of pod liveness-http is 0 +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:31:30.404: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-fbk48" for this suite. +May 6 07:31:36.457: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:31:36.683: INFO: namespace: e2e-tests-container-probe-fbk48, resource: bindings, ignored listing per whitelist +May 6 07:31:36.716: INFO: namespace e2e-tests-container-probe-fbk48 deletion completed in 6.287243634s + +• [SLOW TEST:255.149 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:31:36.716: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the pod with lifecycle hook +STEP: delete the pod with lifecycle hook +May 6 07:31:50.902: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +May 6 07:31:50.907: INFO: Pod pod-with-prestop-http-hook still exists +May 6 07:31:52.907: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +May 6 07:31:52.912: INFO: Pod pod-with-prestop-http-hook still exists +May 6 07:31:54.908: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +May 6 07:31:54.914: INFO: Pod pod-with-prestop-http-hook still exists +May 6 07:31:56.907: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +May 6 07:31:56.916: INFO: Pod pod-with-prestop-http-hook still exists +May 6 07:31:58.907: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +May 6 07:31:58.916: INFO: Pod pod-with-prestop-http-hook still exists +May 6 07:32:00.908: INFO: Waiting for pod pod-with-prestop-http-hook to disappear +May 6 07:32:00.914: INFO: Pod pod-with-prestop-http-hook no longer exists +STEP: check prestop hook +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:32:01.015: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-br956" for this suite. +May 6 07:32:23.038: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:32:23.142: INFO: namespace: e2e-tests-container-lifecycle-hook-br956, resource: bindings, ignored listing per whitelist +May 6 07:32:23.200: INFO: namespace e2e-tests-container-lifecycle-hook-br956 deletion completed in 22.178228787s + +• [SLOW TEST:46.484 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when create a pod with lifecycle hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute prestop http hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with secret pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:32:23.200: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with secret pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod pod-subpath-test-secret-hpnf +STEP: Creating a pod to test atomic-volume-subpath +May 6 07:32:23.314: INFO: Waiting up to 5m0s for pod "pod-subpath-test-secret-hpnf" in namespace "e2e-tests-subpath-n4hzx" to be "success or failure" +May 6 07:32:23.330: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Pending", Reason="", readiness=false. Elapsed: 14.965528ms +May 6 07:32:25.337: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Pending", Reason="", readiness=false. Elapsed: 2.02252069s +May 6 07:32:27.342: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Pending", Reason="", readiness=false. Elapsed: 4.027262963s +May 6 07:32:29.349: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Pending", Reason="", readiness=false. Elapsed: 6.034305175s +May 6 07:32:31.353: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Pending", Reason="", readiness=false. Elapsed: 8.038361672s +May 6 07:32:33.357: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 10.042561387s +May 6 07:32:35.361: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 12.046800193s +May 6 07:32:37.368: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 14.053772367s +May 6 07:32:39.375: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 16.060214251s +May 6 07:32:41.379: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 18.06403952s +May 6 07:32:43.396: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 20.081473986s +May 6 07:32:45.402: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 22.087173868s +May 6 07:32:47.406: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 24.091916199s +May 6 07:32:49.412: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 26.097766011s +May 6 07:32:51.416: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Running", Reason="", readiness=false. Elapsed: 28.101406641s +May 6 07:32:53.420: INFO: Pod "pod-subpath-test-secret-hpnf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 30.105397409s +STEP: Saw pod success +May 6 07:32:53.420: INFO: Pod "pod-subpath-test-secret-hpnf" satisfied condition "success or failure" +May 6 07:32:53.423: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-subpath-test-secret-hpnf container test-container-subpath-secret-hpnf: +STEP: delete the pod +May 6 07:32:53.449: INFO: Waiting for pod pod-subpath-test-secret-hpnf to disappear +May 6 07:32:53.451: INFO: Pod pod-subpath-test-secret-hpnf no longer exists +STEP: Deleting pod pod-subpath-test-secret-hpnf +May 6 07:32:53.451: INFO: Deleting pod "pod-subpath-test-secret-hpnf" in namespace "e2e-tests-subpath-n4hzx" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:32:53.454: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-subpath-n4hzx" for this suite. +May 6 07:32:59.477: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:32:59.509: INFO: namespace: e2e-tests-subpath-n4hzx, resource: bindings, ignored listing per whitelist +May 6 07:32:59.607: INFO: namespace e2e-tests-subpath-n4hzx deletion completed in 6.149100517s + +• [SLOW TEST:36.407 seconds] +[sig-storage] Subpath +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with secret pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[sig-api-machinery] Garbage collector + should delete pods created by rc when not orphaning [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:32:59.610: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename gc +STEP: Waiting for a default service account to be provisioned in namespace +[It] should delete pods created by rc when not orphaning [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the rc +STEP: delete the rc +STEP: wait for all pods to be garbage collected +STEP: Gathering metrics +W0506 07:33:09.905164 14 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled. +May 6 07:33:09.905: INFO: For apiserver_request_count: +For apiserver_request_latencies_summary: +For etcd_helper_cache_entry_count: +For etcd_helper_cache_hit_count: +For etcd_helper_cache_miss_count: +For etcd_request_cache_add_latencies_summary: +For etcd_request_cache_get_latencies_summary: +For etcd_request_latencies_summary: +For garbage_collector_attempt_to_delete_queue_latency: +For garbage_collector_attempt_to_delete_work_duration: +For garbage_collector_attempt_to_orphan_queue_latency: +For garbage_collector_attempt_to_orphan_work_duration: +For garbage_collector_dirty_processing_latency_microseconds: +For garbage_collector_event_processing_latency_microseconds: +For garbage_collector_graph_changes_queue_latency: +For garbage_collector_graph_changes_work_duration: +For garbage_collector_orphan_processing_latency_microseconds: +For namespace_queue_latency: +For namespace_queue_latency_sum: +For namespace_queue_latency_count: +For namespace_retries: +For namespace_work_duration: +For namespace_work_duration_sum: +For namespace_work_duration_count: +For function_duration_seconds: +For errors_total: +For evicted_pods_total: + +[AfterEach] [sig-api-machinery] Garbage collector + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:33:09.905: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-gc-z2hbp" for this suite. +May 6 07:33:15.926: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:33:16.017: INFO: namespace: e2e-tests-gc-z2hbp, resource: bindings, ignored listing per whitelist +May 6 07:33:16.047: INFO: namespace e2e-tests-gc-z2hbp deletion completed in 6.138195436s + +• [SLOW TEST:16.437 seconds] +[sig-api-machinery] Garbage collector +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should delete pods created by rc when not orphaning [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[k8s.io] Probing container + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:33:16.047: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod liveness-exec in namespace e2e-tests-container-probe-v7wlq +May 6 07:33:18.194: INFO: Started pod liveness-exec in namespace e2e-tests-container-probe-v7wlq +STEP: checking the pod's current state and verifying that restartCount is present +May 6 07:33:18.199: INFO: Initial restart count of pod liveness-exec is 0 +May 6 07:34:12.337: INFO: Restart count of pod e2e-tests-container-probe-v7wlq/liveness-exec is now 1 (54.137828001s elapsed) +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:34:12.360: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-v7wlq" for this suite. +May 6 07:34:18.379: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:34:18.428: INFO: namespace: e2e-tests-container-probe-v7wlq, resource: bindings, ignored listing per whitelist +May 6 07:34:18.505: INFO: namespace e2e-tests-container-probe-v7wlq deletion completed in 6.140281036s + +• [SLOW TEST:62.458 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Downward API volume + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:34:18.507: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 07:34:18.644: INFO: Waiting up to 5m0s for pod "downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-s75wx" to be "success or failure" +May 6 07:34:18.652: INFO: Pod "downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.327608ms +May 6 07:34:20.657: INFO: Pod "downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.012499662s +May 6 07:34:22.671: INFO: Pod "downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027105466s +STEP: Saw pod success +May 6 07:34:22.672: INFO: Pod "downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:34:22.675: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 07:34:22.704: INFO: Waiting for pod downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035 to disappear +May 6 07:34:22.707: INFO: Pod downwardapi-volume-5b8d6ce0-6fd1-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:34:22.707: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-s75wx" for this suite. +May 6 07:34:28.721: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:34:28.859: INFO: namespace: e2e-tests-downward-api-s75wx, resource: bindings, ignored listing per whitelist +May 6 07:34:28.903: INFO: namespace e2e-tests-downward-api-s75wx deletion completed in 6.192958164s + +• [SLOW TEST:10.396 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:34:28.903: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-61bd344b-6fd1-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume secrets +May 6 07:34:29.027: INFO: Waiting up to 5m0s for pod "pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-v72nc" to be "success or failure" +May 6 07:34:29.032: INFO: Pod "pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.670103ms +May 6 07:34:31.036: INFO: Pod "pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008785185s +May 6 07:34:33.042: INFO: Pod "pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014571467s +STEP: Saw pod success +May 6 07:34:33.042: INFO: Pod "pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:34:33.048: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035 container secret-volume-test: +STEP: delete the pod +May 6 07:34:33.070: INFO: Waiting for pod pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035 to disappear +May 6 07:34:33.073: INFO: Pod pod-secrets-61be1163-6fd1-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:34:33.073: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-v72nc" for this suite. +May 6 07:34:39.094: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:34:39.147: INFO: namespace: e2e-tests-secrets-v72nc, resource: bindings, ignored listing per whitelist +May 6 07:34:39.218: INFO: namespace e2e-tests-secrets-v72nc deletion completed in 6.136633333s + +• [SLOW TEST:10.315 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be consumable from pods in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-auth] ServiceAccounts + should allow opting out of API token automount [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-auth] ServiceAccounts + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:34:39.218: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename svcaccounts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow opting out of API token automount [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: getting the auto-created API token +May 6 07:34:39.851: INFO: created pod pod-service-account-defaultsa +May 6 07:34:39.851: INFO: pod pod-service-account-defaultsa service account token volume mount: true +May 6 07:34:39.860: INFO: created pod pod-service-account-mountsa +May 6 07:34:39.860: INFO: pod pod-service-account-mountsa service account token volume mount: true +May 6 07:34:39.877: INFO: created pod pod-service-account-nomountsa +May 6 07:34:39.877: INFO: pod pod-service-account-nomountsa service account token volume mount: false +May 6 07:34:39.889: INFO: created pod pod-service-account-defaultsa-mountspec +May 6 07:34:39.889: INFO: pod pod-service-account-defaultsa-mountspec service account token volume mount: true +May 6 07:34:39.900: INFO: created pod pod-service-account-mountsa-mountspec +May 6 07:34:39.900: INFO: pod pod-service-account-mountsa-mountspec service account token volume mount: true +May 6 07:34:39.908: INFO: created pod pod-service-account-nomountsa-mountspec +May 6 07:34:39.908: INFO: pod pod-service-account-nomountsa-mountspec service account token volume mount: true +May 6 07:34:39.917: INFO: created pod pod-service-account-defaultsa-nomountspec +May 6 07:34:39.917: INFO: pod pod-service-account-defaultsa-nomountspec service account token volume mount: false +May 6 07:34:39.929: INFO: created pod pod-service-account-mountsa-nomountspec +May 6 07:34:39.930: INFO: pod pod-service-account-mountsa-nomountspec service account token volume mount: false +May 6 07:34:39.937: INFO: created pod pod-service-account-nomountsa-nomountspec +May 6 07:34:39.937: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false +[AfterEach] [sig-auth] ServiceAccounts + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:34:39.937: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-svcaccounts-wjhbg" for this suite. +May 6 07:35:01.960: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:35:02.022: INFO: namespace: e2e-tests-svcaccounts-wjhbg, resource: bindings, ignored listing per whitelist +May 6 07:35:02.113: INFO: namespace e2e-tests-svcaccounts-wjhbg deletion completed in 22.170133914s + +• [SLOW TEST:22.895 seconds] +[sig-auth] ServiceAccounts +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22 + should allow opting out of API token automount [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0666,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:35:02.113: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0666,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0666 on node default medium +May 6 07:35:02.311: INFO: Waiting up to 5m0s for pod "pod-75949aee-6fd1-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-8jspl" to be "success or failure" +May 6 07:35:02.320: INFO: Pod "pod-75949aee-6fd1-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 9.006677ms +May 6 07:35:04.325: INFO: Pod "pod-75949aee-6fd1-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013695073s +STEP: Saw pod success +May 6 07:35:04.325: INFO: Pod "pod-75949aee-6fd1-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:35:04.329: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-75949aee-6fd1-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 07:35:04.357: INFO: Waiting for pod pod-75949aee-6fd1-11e9-a235-ba138c0d9035 to disappear +May 6 07:35:04.360: INFO: Pod pod-75949aee-6fd1-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:35:04.360: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-8jspl" for this suite. +May 6 07:35:10.384: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:35:10.520: INFO: namespace: e2e-tests-emptydir-8jspl, resource: bindings, ignored listing per whitelist +May 6 07:35:10.540: INFO: namespace e2e-tests-emptydir-8jspl deletion completed in 6.175662935s + +• [SLOW TEST:8.427 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0666,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSS +------------------------------ +[k8s.io] Pods + should get a host IP [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:35:10.542: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should get a host IP [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating pod +May 6 07:35:14.672: INFO: Pod pod-hostip-7a8d1e43-6fd1-11e9-a235-ba138c0d9035 has hostIP: 10.0.0.19 +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:35:14.672: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-zd557" for this suite. +May 6 07:35:36.690: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:35:36.786: INFO: namespace: e2e-tests-pods-zd557, resource: bindings, ignored listing per whitelist +May 6 07:35:36.835: INFO: namespace e2e-tests-pods-zd557 deletion completed in 22.158309695s + +• [SLOW TEST:26.293 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should get a host IP [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-api-machinery] Watchers + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:35:36.835: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a watch on configmaps with label A +STEP: creating a watch on configmaps with label B +STEP: creating a watch on configmaps with label A or B +STEP: creating a configmap with label A and ensuring the correct watchers observe the notification +May 6 07:35:36.961: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4456,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +May 6 07:35:36.961: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4456,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +STEP: modifying configmap A and ensuring the correct watchers observe the notification +May 6 07:35:46.973: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4476,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +May 6 07:35:46.974: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4476,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +STEP: modifying configmap A again and ensuring the correct watchers observe the notification +May 6 07:35:56.984: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4497,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +May 6 07:35:56.984: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4497,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +STEP: deleting configmap A and ensuring the correct watchers observe the notification +May 6 07:36:06.993: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4517,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +May 6 07:36:06.994: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-a,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-a,UID:8a3c48ac-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4517,Generation:0,CreationTimestamp:2019-05-06 07:35:36 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-A,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +STEP: creating a configmap with label B and ensuring the correct watchers observe the notification +May 6 07:36:17.001: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-b,UID:a219e9e2-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4537,Generation:0,CreationTimestamp:2019-05-06 07:36:16 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +May 6 07:36:17.002: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-b,UID:a219e9e2-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4537,Generation:0,CreationTimestamp:2019-05-06 07:36:16 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +STEP: deleting configmap B and ensuring the correct watchers observe the notification +May 6 07:36:27.009: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-b,UID:a219e9e2-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4558,Generation:0,CreationTimestamp:2019-05-06 07:36:16 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +May 6 07:36:27.009: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-configmap-b,GenerateName:,Namespace:e2e-tests-watch-drn7x,SelfLink:/api/v1/namespaces/e2e-tests-watch-drn7x/configmaps/e2e-watch-test-configmap-b,UID:a219e9e2-6fd1-11e9-8e1b-fa163ee16beb,ResourceVersion:4558,Generation:0,CreationTimestamp:2019-05-06 07:36:16 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: multiple-watchers-B,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:36:37.010: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-drn7x" for this suite. +May 6 07:36:43.035: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:36:43.102: INFO: namespace: e2e-tests-watch-drn7x, resource: bindings, ignored listing per whitelist +May 6 07:36:43.187: INFO: namespace e2e-tests-watch-drn7x deletion completed in 6.171337467s + +• [SLOW TEST:66.352 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should observe add, update, and delete watch notifications on configmaps [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:36:43.188: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-map-b1ce5ec4-6fd1-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 07:36:43.358: INFO: Waiting up to 5m0s for pod "pod-configmaps-b1cef053-6fd1-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-zgqkk" to be "success or failure" +May 6 07:36:43.365: INFO: Pod "pod-configmaps-b1cef053-6fd1-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.60277ms +May 6 07:36:45.369: INFO: Pod "pod-configmaps-b1cef053-6fd1-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010836394s +STEP: Saw pod success +May 6 07:36:45.369: INFO: Pod "pod-configmaps-b1cef053-6fd1-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:36:45.373: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-b1cef053-6fd1-11e9-a235-ba138c0d9035 container configmap-volume-test: +STEP: delete the pod +May 6 07:36:45.401: INFO: Waiting for pod pod-configmaps-b1cef053-6fd1-11e9-a235-ba138c0d9035 to disappear +May 6 07:36:45.405: INFO: Pod pod-configmaps-b1cef053-6fd1-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:36:45.405: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-zgqkk" for this suite. +May 6 07:36:51.428: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:36:51.458: INFO: namespace: e2e-tests-configmap-zgqkk, resource: bindings, ignored listing per whitelist +May 6 07:36:51.582: INFO: namespace e2e-tests-configmap-zgqkk deletion completed in 6.171546724s + +• [SLOW TEST:8.394 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:36:51.583: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-jh8vj +[It] Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating stateful set ss in namespace e2e-tests-statefulset-jh8vj +STEP: Waiting until all stateful set ss replicas will be running in namespace e2e-tests-statefulset-jh8vj +May 6 07:36:51.785: INFO: Found 0 stateful pods, waiting for 1 +May 6 07:37:01.790: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will not halt with unhealthy stateful pod +May 6 07:37:01.794: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-jh8vj ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:37:02.127: INFO: stderr: "" +May 6 07:37:02.127: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:37:02.127: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:37:02.131: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +May 6 07:37:12.144: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:37:12.144: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 07:37:12.168: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:12.168: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:02 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:02 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:12.168: INFO: ss-1 Pending [] +May 6 07:37:12.169: INFO: +May 6 07:37:12.169: INFO: StatefulSet ss has not reached scale 3, at 2 +May 6 07:37:13.180: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.989557182s +May 6 07:37:14.184: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.979058179s +May 6 07:37:15.189: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.974448596s +May 6 07:37:16.193: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.970153114s +May 6 07:37:17.197: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.9655502s +May 6 07:37:18.201: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.961464592s +May 6 07:37:19.206: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.95747995s +May 6 07:37:20.211: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.953043013s +May 6 07:37:21.215: INFO: Verifying statefulset ss doesn't scale past 3 for another 947.951391ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace e2e-tests-statefulset-jh8vj +May 6 07:37:22.221: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-jh8vj ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 6 07:37:22.472: INFO: stderr: "" +May 6 07:37:22.472: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 6 07:37:22.472: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 6 07:37:22.472: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-jh8vj ss-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 6 07:37:22.775: INFO: stderr: "mv: can't rename '/tmp/index.html': No such file or directory\n" +May 6 07:37:22.775: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 6 07:37:22.775: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 6 07:37:22.775: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-jh8vj ss-2 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 6 07:37:23.052: INFO: stderr: "mv: can't rename '/tmp/index.html': No such file or directory\n" +May 6 07:37:23.052: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 6 07:37:23.052: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 6 07:37:23.057: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +May 6 07:37:23.057: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +May 6 07:37:23.057: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Scale down will not halt with unhealthy stateful pod +May 6 07:37:23.060: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-jh8vj ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:37:23.341: INFO: stderr: "" +May 6 07:37:23.341: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:37:23.341: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:37:23.341: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-jh8vj ss-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:37:23.633: INFO: stderr: "" +May 6 07:37:23.633: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:37:23.633: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:37:23.633: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-jh8vj ss-2 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:37:23.884: INFO: stderr: "" +May 6 07:37:23.884: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:37:23.884: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:37:23.884: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 07:37:23.886: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 3 +May 6 07:37:33.894: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:37:33.894: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:37:33.895: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:37:33.909: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:33.909: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:33.910: INFO: ss-1 kubernetes-cluster-2696-minion-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:33.910: INFO: ss-2 kubernetes-cluster-2696-minion-0 Running [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:33.910: INFO: +May 6 07:37:33.910: INFO: StatefulSet ss has not reached scale 0, at 3 +May 6 07:37:34.914: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:34.914: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:34.915: INFO: ss-1 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:34.915: INFO: ss-2 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:34.915: INFO: +May 6 07:37:34.915: INFO: StatefulSet ss has not reached scale 0, at 3 +May 6 07:37:35.920: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:35.920: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:35.920: INFO: ss-1 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:35.920: INFO: ss-2 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:35.920: INFO: +May 6 07:37:35.920: INFO: StatefulSet ss has not reached scale 0, at 3 +May 6 07:37:36.926: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:36.926: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:36.926: INFO: ss-1 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:36.926: INFO: ss-2 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:36.926: INFO: +May 6 07:37:36.926: INFO: StatefulSet ss has not reached scale 0, at 3 +May 6 07:37:37.930: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:37.930: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:37.930: INFO: ss-1 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:37.931: INFO: ss-2 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:37.931: INFO: +May 6 07:37:37.931: INFO: StatefulSet ss has not reached scale 0, at 3 +May 6 07:37:38.936: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:38.936: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:38.936: INFO: ss-1 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:38.936: INFO: ss-2 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:38.936: INFO: +May 6 07:37:38.936: INFO: StatefulSet ss has not reached scale 0, at 3 +May 6 07:37:39.941: INFO: POD NODE PHASE GRACE CONDITIONS +May 6 07:37:39.941: INFO: ss-0 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:36:51 +0000 UTC }] +May 6 07:37:39.941: INFO: ss-1 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:23 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:39.941: INFO: ss-2 kubernetes-cluster-2696-minion-0 Running 30s [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:24 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:37:12 +0000 UTC }] +May 6 07:37:39.941: INFO: +May 6 07:37:39.941: INFO: StatefulSet ss has not reached scale 0, at 3 +May 6 07:37:40.946: INFO: Verifying statefulset ss doesn't scale past 0 for another 2.961758033s +May 6 07:37:41.950: INFO: Verifying statefulset ss doesn't scale past 0 for another 1.956867731s +May 6 07:37:42.954: INFO: Verifying statefulset ss doesn't scale past 0 for another 953.311124ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacee2e-tests-statefulset-jh8vj +May 6 07:37:43.958: INFO: Scaling statefulset ss to 0 +May 6 07:37:43.990: INFO: Waiting for statefulset status.replicas updated to 0 +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +May 6 07:37:43.995: INFO: Deleting all statefulset in ns e2e-tests-statefulset-jh8vj +May 6 07:37:43.998: INFO: Scaling statefulset ss to 0 +May 6 07:37:44.012: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 07:37:44.016: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:37:44.032: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-jh8vj" for this suite. +May 6 07:37:50.061: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:37:50.185: INFO: namespace: e2e-tests-statefulset-jh8vj, resource: bindings, ignored listing per whitelist +May 6 07:37:50.199: INFO: namespace e2e-tests-statefulset-jh8vj deletion completed in 6.160793454s + +• [SLOW TEST:58.616 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + Burst scaling should run to completion even with unhealthy pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with configmap pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:37:50.200: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with configmap pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod pod-subpath-test-configmap-245p +STEP: Creating a pod to test atomic-volume-subpath +May 6 07:37:50.362: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-245p" in namespace "e2e-tests-subpath-p84f8" to be "success or failure" +May 6 07:37:50.373: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Pending", Reason="", readiness=false. Elapsed: 11.56433ms +May 6 07:37:52.378: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01653019s +May 6 07:37:54.382: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 4.020839793s +May 6 07:37:56.387: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 6.025188791s +May 6 07:37:58.391: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 8.029658231s +May 6 07:38:00.397: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 10.035442382s +May 6 07:38:02.401: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 12.039615718s +May 6 07:38:04.406: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 14.043921677s +May 6 07:38:06.409: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 16.047636074s +May 6 07:38:08.414: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 18.052762096s +May 6 07:38:10.419: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 20.057644904s +May 6 07:38:12.425: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Running", Reason="", readiness=false. Elapsed: 22.063231534s +May 6 07:38:14.430: INFO: Pod "pod-subpath-test-configmap-245p": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.068745253s +STEP: Saw pod success +May 6 07:38:14.430: INFO: Pod "pod-subpath-test-configmap-245p" satisfied condition "success or failure" +May 6 07:38:14.434: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-subpath-test-configmap-245p container test-container-subpath-configmap-245p: +STEP: delete the pod +May 6 07:38:14.476: INFO: Waiting for pod pod-subpath-test-configmap-245p to disappear +May 6 07:38:14.479: INFO: Pod pod-subpath-test-configmap-245p no longer exists +STEP: Deleting pod pod-subpath-test-configmap-245p +May 6 07:38:14.479: INFO: Deleting pod "pod-subpath-test-configmap-245p" in namespace "e2e-tests-subpath-p84f8" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:38:14.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-subpath-p84f8" for this suite. +May 6 07:38:20.503: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:38:20.543: INFO: namespace: e2e-tests-subpath-p84f8, resource: bindings, ignored listing per whitelist +May 6 07:38:20.624: INFO: namespace e2e-tests-subpath-p84f8 deletion completed in 6.137577038s + +• [SLOW TEST:30.424 seconds] +[sig-storage] Subpath +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with configmap pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Should recreate evicted statefulset [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:38:20.626: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-jm52c +[It] Should recreate evicted statefulset [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Looking for a node to schedule stateful set and pod +STEP: Creating pod with conflicting port in namespace e2e-tests-statefulset-jm52c +STEP: Creating statefulset with conflicting port in namespace e2e-tests-statefulset-jm52c +STEP: Waiting until pod test-pod will start running in namespace e2e-tests-statefulset-jm52c +STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace e2e-tests-statefulset-jm52c +May 6 07:38:22.818: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-jm52c, name: ss-0, uid: ebede2b7-6fd1-11e9-8e1b-fa163ee16beb, status phase: Pending. Waiting for statefulset controller to delete. +May 6 07:38:30.142: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-jm52c, name: ss-0, uid: ebede2b7-6fd1-11e9-8e1b-fa163ee16beb, status phase: Failed. Waiting for statefulset controller to delete. +May 6 07:38:30.152: INFO: Observed stateful pod in namespace: e2e-tests-statefulset-jm52c, name: ss-0, uid: ebede2b7-6fd1-11e9-8e1b-fa163ee16beb, status phase: Failed. Waiting for statefulset controller to delete. +May 6 07:38:30.161: INFO: Observed delete event for stateful pod ss-0 in namespace e2e-tests-statefulset-jm52c +STEP: Removing pod with conflicting port in namespace e2e-tests-statefulset-jm52c +STEP: Waiting when stateful pod ss-0 will be recreated in namespace e2e-tests-statefulset-jm52c and will be in running state +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +May 6 07:38:42.236: INFO: Deleting all statefulset in ns e2e-tests-statefulset-jm52c +May 6 07:38:42.239: INFO: Scaling statefulset ss to 0 +May 6 07:38:52.254: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 07:38:52.257: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:38:52.274: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-jm52c" for this suite. +May 6 07:38:58.292: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:38:58.346: INFO: namespace: e2e-tests-statefulset-jm52c, resource: bindings, ignored listing per whitelist +May 6 07:38:58.406: INFO: namespace e2e-tests-statefulset-jm52c deletion completed in 6.127780509s + +• [SLOW TEST:37.780 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + Should recreate evicted statefulset [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:38:58.407: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the pod with lifecycle hook +STEP: delete the pod with lifecycle hook +May 6 07:39:06.586: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:06.599: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:08.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:08.604: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:10.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:10.605: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:12.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:12.613: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:14.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:14.605: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:16.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:16.604: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:18.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:18.604: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:20.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:20.604: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:22.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:22.604: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:24.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:24.609: INFO: Pod pod-with-prestop-exec-hook still exists +May 6 07:39:26.599: INFO: Waiting for pod pod-with-prestop-exec-hook to disappear +May 6 07:39:26.603: INFO: Pod pod-with-prestop-exec-hook no longer exists +STEP: check prestop hook +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:39:26.617: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-clgg2" for this suite. +May 6 07:39:48.639: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:39:48.669: INFO: namespace: e2e-tests-container-lifecycle-hook-clgg2, resource: bindings, ignored listing per whitelist +May 6 07:39:48.825: INFO: namespace e2e-tests-container-lifecycle-hook-clgg2 deletion completed in 22.204602823s + +• [SLOW TEST:50.419 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when create a pod with lifecycle hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute prestop exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Secrets + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:39:48.826: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-206de067-6fd2-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume secrets +May 6 07:39:48.955: INFO: Waiting up to 5m0s for pod "pod-secrets-206e7c92-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-5rsch" to be "success or failure" +May 6 07:39:48.976: INFO: Pod "pod-secrets-206e7c92-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 19.954488ms +May 6 07:39:50.982: INFO: Pod "pod-secrets-206e7c92-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.025887621s +STEP: Saw pod success +May 6 07:39:50.982: INFO: Pod "pod-secrets-206e7c92-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:39:50.985: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-206e7c92-6fd2-11e9-a235-ba138c0d9035 container secret-volume-test: +STEP: delete the pod +May 6 07:39:51.017: INFO: Waiting for pod pod-secrets-206e7c92-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:39:51.021: INFO: Pod pod-secrets-206e7c92-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:39:51.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-5rsch" for this suite. +May 6 07:39:57.052: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:39:57.106: INFO: namespace: e2e-tests-secrets-5rsch, resource: bindings, ignored listing per whitelist +May 6 07:39:57.175: INFO: namespace e2e-tests-secrets-5rsch deletion completed in 6.148697844s + +• [SLOW TEST:8.349 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be consumable in multiple volumes in a pod [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl run rc + should create an rc from an image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:39:57.175: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl run rc + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1298 +[It] should create an rc from an image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +May 6 07:39:57.272: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=e2e-tests-kubectl-2825b' +May 6 07:39:58.110: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n" +May 6 07:39:58.110: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n" +STEP: verifying the rc e2e-test-nginx-rc was created +STEP: verifying the pod controlled by rc e2e-test-nginx-rc was created +STEP: confirm that you can get logs from an rc +May 6 07:39:58.135: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [e2e-test-nginx-rc-vdmpz] +May 6 07:39:58.135: INFO: Waiting up to 5m0s for pod "e2e-test-nginx-rc-vdmpz" in namespace "e2e-tests-kubectl-2825b" to be "running and ready" +May 6 07:39:58.144: INFO: Pod "e2e-test-nginx-rc-vdmpz": Phase="Pending", Reason="", readiness=false. Elapsed: 9.015299ms +May 6 07:40:00.148: INFO: Pod "e2e-test-nginx-rc-vdmpz": Phase="Running", Reason="", readiness=true. Elapsed: 2.012657319s +May 6 07:40:00.148: INFO: Pod "e2e-test-nginx-rc-vdmpz" satisfied condition "running and ready" +May 6 07:40:00.148: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [e2e-test-nginx-rc-vdmpz] +May 6 07:40:00.148: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 logs rc/e2e-test-nginx-rc --namespace=e2e-tests-kubectl-2825b' +May 6 07:40:00.306: INFO: stderr: "" +May 6 07:40:00.306: INFO: stdout: "" +[AfterEach] [k8s.io] Kubectl run rc + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1303 +May 6 07:40:00.307: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete rc e2e-test-nginx-rc --namespace=e2e-tests-kubectl-2825b' +May 6 07:40:00.442: INFO: stderr: "" +May 6 07:40:00.442: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:40:00.442: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-2825b" for this suite. +May 6 07:40:22.514: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:40:22.676: INFO: namespace: e2e-tests-kubectl-2825b, resource: bindings, ignored listing per whitelist +May 6 07:40:22.681: INFO: namespace e2e-tests-kubectl-2825b deletion completed in 22.194817381s + +• [SLOW TEST:25.506 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl run rc + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create an rc from an image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[k8s.io] Kubelet when scheduling a busybox command in a pod + should print the output to logs [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:40:22.683: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[It] should print the output to logs [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:40:24.858: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-5kv5b" for this suite. +May 6 07:41:14.880: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:41:14.969: INFO: namespace: e2e-tests-kubelet-test-5kv5b, resource: bindings, ignored listing per whitelist +May 6 07:41:15.010: INFO: namespace e2e-tests-kubelet-test-5kv5b deletion completed in 50.14733224s + +• [SLOW TEST:52.328 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a busybox command in a pod + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:40 + should print the output to logs [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:41:15.012: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +May 6 07:41:15.110: INFO: Waiting up to 5m0s for pod "downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-87mdf" to be "success or failure" +May 6 07:41:15.114: INFO: Pod "downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.124711ms +May 6 07:41:17.120: INFO: Pod "downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.00975464s +May 6 07:41:19.126: INFO: Pod "downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016108696s +STEP: Saw pod success +May 6 07:41:19.126: INFO: Pod "downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:41:19.130: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035 container dapi-container: +STEP: delete the pod +May 6 07:41:19.160: INFO: Waiting for pod downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:41:19.165: INFO: Pod downward-api-53c9861f-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:41:19.165: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-87mdf" for this suite. +May 6 07:41:25.182: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:41:25.246: INFO: namespace: e2e-tests-downward-api-87mdf, resource: bindings, ignored listing per whitelist +May 6 07:41:25.305: INFO: namespace e2e-tests-downward-api-87mdf deletion completed in 6.136049497s + +• [SLOW TEST:10.293 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Secrets + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:41:25.306: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-59f5b3d1-6fd2-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume secrets +May 6 07:41:25.544: INFO: Waiting up to 5m0s for pod "pod-secrets-5a002967-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-9rbdk" to be "success or failure" +May 6 07:41:25.562: INFO: Pod "pod-secrets-5a002967-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 17.840878ms +May 6 07:41:27.569: INFO: Pod "pod-secrets-5a002967-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.024233662s +STEP: Saw pod success +May 6 07:41:27.569: INFO: Pod "pod-secrets-5a002967-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:41:27.572: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-5a002967-6fd2-11e9-a235-ba138c0d9035 container secret-volume-test: +STEP: delete the pod +May 6 07:41:27.633: INFO: Waiting for pod pod-secrets-5a002967-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:41:27.637: INFO: Pod pod-secrets-5a002967-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:41:27.637: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-9rbdk" for this suite. +May 6 07:41:33.676: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:41:33.706: INFO: namespace: e2e-tests-secrets-9rbdk, resource: bindings, ignored listing per whitelist +May 6 07:41:33.808: INFO: namespace e2e-tests-secrets-9rbdk deletion completed in 6.151689826s +STEP: Destroying namespace "e2e-tests-secret-namespace-fkx8z" for this suite. +May 6 07:41:39.824: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:41:39.944: INFO: namespace: e2e-tests-secret-namespace-fkx8z, resource: bindings, ignored listing per whitelist +May 6 07:41:39.948: INFO: namespace e2e-tests-secret-namespace-fkx8z deletion completed in 6.139680896s + +• [SLOW TEST:14.642 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:41:39.948: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name cm-test-opt-del-62afa2e0-6fd2-11e9-a235-ba138c0d9035 +STEP: Creating configMap with name cm-test-opt-upd-62afa421-6fd2-11e9-a235-ba138c0d9035 +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-62afa2e0-6fd2-11e9-a235-ba138c0d9035 +STEP: Updating configmap cm-test-opt-upd-62afa421-6fd2-11e9-a235-ba138c0d9035 +STEP: Creating configMap with name cm-test-opt-create-62afa481-6fd2-11e9-a235-ba138c0d9035 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:42:50.689: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-59wk5" for this suite. +May 6 07:43:12.704: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:43:12.736: INFO: namespace: e2e-tests-projected-59wk5, resource: bindings, ignored listing per whitelist +May 6 07:43:12.816: INFO: namespace e2e-tests-projected-59wk5 deletion completed in 22.123600633s + +• [SLOW TEST:92.868 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[k8s.io] [sig-node] Events + should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] [sig-node] Events + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:43:12.817: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename events +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: retrieving the pod +May 6 07:43:18.953: INFO: &Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:send-events-9a030a7c-6fd2-11e9-a235-ba138c0d9035,GenerateName:,Namespace:e2e-tests-events-6qddt,SelfLink:/api/v1/namespaces/e2e-tests-events-6qddt/pods/send-events-9a030a7c-6fd2-11e9-a235-ba138c0d9035,UID:9a03eaa3-6fd2-11e9-8e1b-fa163ee16beb,ResourceVersion:6054,Generation:0,CreationTimestamp:2019-05-06 07:43:12 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: foo,time: 921673567,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-jgmqj {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-jgmqj,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{p gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 [] [] [{ 0 80 TCP }] [] [] {map[] map[]} [{default-token-jgmqj true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:43:12 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:43:17 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:43:17 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 07:43:12 +0000 UTC }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.101,StartTime:2019-05-06 07:43:12 +0000 UTC,ContainerStatuses:[{p {nil ContainerStateRunning{StartedAt:2019-05-06 07:43:17 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1 docker-pullable://gcr.io/kubernetes-e2e-test-images/serve-hostname@sha256:bab70473a6d8ef65a22625dc9a1b0f0452e811530fdbe77e4408523460177ff1 docker://a3bddc06ce4f8a8c5e6bbdcd2eb70760809e37b68141eb4d1697fbf8f0091191}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} + +STEP: checking for scheduler event about the pod +May 6 07:43:20.958: INFO: Saw scheduler event for our pod. +STEP: checking for kubelet event about the pod +May 6 07:43:22.963: INFO: Saw kubelet event for our pod. +STEP: deleting the pod +[AfterEach] [k8s.io] [sig-node] Events + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:43:22.969: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-events-6qddt" for this suite. +May 6 07:44:00.993: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:44:01.057: INFO: namespace: e2e-tests-events-6qddt, resource: bindings, ignored listing per whitelist +May 6 07:44:01.122: INFO: namespace e2e-tests-events-6qddt deletion completed in 38.14835605s + +• [SLOW TEST:48.305 seconds] +[k8s.io] [sig-node] Events +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be sent by kubelets and the scheduler about pods scheduling and running [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-network] DNS + should provide DNS for services [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] DNS + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:44:01.125: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename dns +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide DNS for services [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a test headless service +STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.e2e-tests-dns-bdwgt;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_udp@_http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/wheezy_tcp@_http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-bdwgt.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 214.27.254.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.254.27.214_udp@PTR;check="$$(dig +tcp +noall +answer +search 214.27.254.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.254.27.214_tcp@PTR;sleep 1; done + +STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service;check="$$(dig +tcp +noall +answer +search dns-test-service A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.e2e-tests-dns-bdwgt;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt;check="$$(dig +notcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +tcp +noall +answer +search dns-test-service.e2e-tests-dns-bdwgt.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc;check="$$(dig +notcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_udp@_http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc;check="$$(dig +tcp +noall +answer +search _http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc SRV)" && test -n "$$check" && echo OK > /results/jessie_tcp@_http._tcp.test-service-2.e2e-tests-dns-bdwgt.svc;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-bdwgt.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;check="$$(dig +notcp +noall +answer +search 214.27.254.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.254.27.214_udp@PTR;check="$$(dig +tcp +noall +answer +search 214.27.254.10.in-addr.arpa. PTR)" && test -n "$$check" && echo OK > /results/10.254.27.214_tcp@PTR;sleep 1; done + +STEP: creating a pod to probe DNS +STEP: submitting the pod to kubernetes +STEP: retrieving the pod +STEP: looking for the results for each expected name from probers +May 6 07:44:21.355: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.362: INFO: Unable to read wheezy_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.372: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.383: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.388: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.392: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.428: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.433: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.438: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.445: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.469: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.476: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.482: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.489: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:21.522: INFO: Lookups using e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-bdwgt jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc] + +May 6 07:44:26.537: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.546: INFO: Unable to read wheezy_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.561: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.574: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.580: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.584: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.621: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.626: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.630: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.634: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.639: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.644: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.648: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.658: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:26.696: INFO: Lookups using e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-bdwgt jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc] + +May 6 07:44:31.529: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.533: INFO: Unable to read wheezy_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.545: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.557: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.562: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.566: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.597: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.602: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.607: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.611: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.616: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.621: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.625: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.631: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:31.666: INFO: Lookups using e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-bdwgt jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc] + +May 6 07:44:36.566: INFO: Unable to read wheezy_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.573: INFO: Unable to read wheezy_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.589: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.605: INFO: Unable to read wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.610: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.615: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.688: INFO: Unable to read jessie_udp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.701: INFO: Unable to read jessie_tcp@dns-test-service from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.709: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.731: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.741: INFO: Unable to read jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.748: INFO: Unable to read jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.753: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.759: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc from pod e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035: the server could not find the requested resource (get pods dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035) +May 6 07:44:36.805: INFO: Lookups using e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035 failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt wheezy_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc wheezy_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.e2e-tests-dns-bdwgt jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt jessie_udp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@dns-test-service.e2e-tests-dns-bdwgt.svc jessie_udp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc jessie_tcp@_http._tcp.dns-test-service.e2e-tests-dns-bdwgt.svc] + +May 6 07:44:41.699: INFO: DNS probes using e2e-tests-dns-bdwgt/dns-test-b6db68d1-6fd2-11e9-a235-ba138c0d9035 succeeded + +STEP: deleting the pod +STEP: deleting the test service +STEP: deleting the test headless service +[AfterEach] [sig-network] DNS + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:44:41.755: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-dns-bdwgt" for this suite. +May 6 07:44:47.776: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:44:47.868: INFO: namespace: e2e-tests-dns-bdwgt, resource: bindings, ignored listing per whitelist +May 6 07:44:47.912: INFO: namespace e2e-tests-dns-bdwgt deletion completed in 6.150144182s + +• [SLOW TEST:46.787 seconds] +[sig-network] DNS +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should provide DNS for services [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + volume on default medium should have the correct mode [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:44:47.915: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] volume on default medium should have the correct mode [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir volume type on node default medium +May 6 07:44:48.069: INFO: Waiting up to 5m0s for pod "pod-d2b857fc-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-ltsl4" to be "success or failure" +May 6 07:44:48.078: INFO: Pod "pod-d2b857fc-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.732697ms +May 6 07:44:50.083: INFO: Pod "pod-d2b857fc-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013506118s +STEP: Saw pod success +May 6 07:44:50.083: INFO: Pod "pod-d2b857fc-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:44:50.086: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-d2b857fc-6fd2-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 07:44:50.127: INFO: Waiting for pod pod-d2b857fc-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:44:50.131: INFO: Pod pod-d2b857fc-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:44:50.131: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-ltsl4" for this suite. +May 6 07:44:56.169: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:44:56.292: INFO: namespace: e2e-tests-emptydir-ltsl4, resource: bindings, ignored listing per whitelist +May 6 07:44:56.338: INFO: namespace e2e-tests-emptydir-ltsl4 deletion completed in 6.201544344s + +• [SLOW TEST:8.423 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + volume on default medium should have the correct mode [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:44:56.338: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-map-d7b7c850-6fd2-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 07:44:56.458: INFO: Waiting up to 5m0s for pod "pod-configmaps-d7b870e3-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-5z944" to be "success or failure" +May 6 07:44:56.476: INFO: Pod "pod-configmaps-d7b870e3-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 18.219221ms +May 6 07:44:58.481: INFO: Pod "pod-configmaps-d7b870e3-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023295183s +STEP: Saw pod success +May 6 07:44:58.481: INFO: Pod "pod-configmaps-d7b870e3-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:44:58.484: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-d7b870e3-6fd2-11e9-a235-ba138c0d9035 container configmap-volume-test: +STEP: delete the pod +May 6 07:44:58.514: INFO: Waiting for pod pod-configmaps-d7b870e3-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:44:58.517: INFO: Pod pod-configmaps-d7b870e3-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:44:58.518: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-5z944" for this suite. +May 6 07:45:04.543: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:45:04.627: INFO: namespace: e2e-tests-configmap-5z944, resource: bindings, ignored listing per whitelist +May 6 07:45:04.692: INFO: namespace e2e-tests-configmap-5z944 deletion completed in 6.168959213s + +• [SLOW TEST:8.355 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Variable Expansion + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Variable Expansion + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:45:04.694: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename var-expansion +STEP: Waiting for a default service account to be provisioned in namespace +[It] should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test substitution in container's args +May 6 07:45:04.824: INFO: Waiting up to 5m0s for pod "var-expansion-dcb4bb1f-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-var-expansion-rvvrd" to be "success or failure" +May 6 07:45:04.831: INFO: Pod "var-expansion-dcb4bb1f-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.511696ms +May 6 07:45:06.839: INFO: Pod "var-expansion-dcb4bb1f-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014722903s +STEP: Saw pod success +May 6 07:45:06.839: INFO: Pod "var-expansion-dcb4bb1f-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:45:06.845: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod var-expansion-dcb4bb1f-6fd2-11e9-a235-ba138c0d9035 container dapi-container: +STEP: delete the pod +May 6 07:45:06.878: INFO: Waiting for pod var-expansion-dcb4bb1f-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:45:06.891: INFO: Pod var-expansion-dcb4bb1f-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [k8s.io] Variable Expansion + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:45:06.891: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-var-expansion-rvvrd" for this suite. +May 6 07:45:12.912: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:45:12.978: INFO: namespace: e2e-tests-var-expansion-rvvrd, resource: bindings, ignored listing per whitelist +May 6 07:45:13.046: INFO: namespace e2e-tests-var-expansion-rvvrd deletion completed in 6.150489895s + +• [SLOW TEST:8.353 seconds] +[k8s.io] Variable Expansion +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should allow substituting values in a container's args [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-apps] ReplicaSet + should adopt matching pods on creation and release no longer matching pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] ReplicaSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:45:13.048: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename replicaset +STEP: Waiting for a default service account to be provisioned in namespace +[It] should adopt matching pods on creation and release no longer matching pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Given a Pod with a 'name' label pod-adoption-release is created +STEP: When a replicaset with a matching selector is created +STEP: Then the orphan pod is adopted +STEP: When the matched label of one of its pods change +May 6 07:45:21.223: INFO: Pod name pod-adoption-release: Found 1 pods out of 1 +STEP: Then the pod is released +[AfterEach] [sig-apps] ReplicaSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:45:21.258: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-replicaset-w7gsz" for this suite. +May 6 07:45:43.281: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:45:43.311: INFO: namespace: e2e-tests-replicaset-w7gsz, resource: bindings, ignored listing per whitelist +May 6 07:45:43.416: INFO: namespace e2e-tests-replicaset-w7gsz deletion completed in 22.147284527s + +• [SLOW TEST:30.368 seconds] +[sig-apps] ReplicaSet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should adopt matching pods on creation and release no longer matching pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0666,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:45:43.418: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0666,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0666 on node default medium +May 6 07:45:43.598: INFO: Waiting up to 5m0s for pod "pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-tx824" to be "success or failure" +May 6 07:45:43.602: INFO: Pod "pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 3.93971ms +May 6 07:45:45.613: INFO: Pod "pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014698056s +May 6 07:45:47.618: INFO: Pod "pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.019234693s +May 6 07:45:49.622: INFO: Pod "pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.024082132s +STEP: Saw pod success +May 6 07:45:49.623: INFO: Pod "pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:45:49.626: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 07:45:49.664: INFO: Waiting for pod pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:45:49.669: INFO: Pod pod-f3d124bb-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:45:49.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-tx824" for this suite. +May 6 07:45:55.693: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:45:55.782: INFO: namespace: e2e-tests-emptydir-tx824, resource: bindings, ignored listing per whitelist +May 6 07:45:55.826: INFO: namespace e2e-tests-emptydir-tx824 deletion completed in 6.152654714s + +• [SLOW TEST:12.409 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0666,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:45:55.827: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with secret that has name projected-secret-test-map-fb2db736-6fd2-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume secrets +May 6 07:45:55.958: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-fb2e7ea3-6fd2-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-j5j84" to be "success or failure" +May 6 07:45:55.969: INFO: Pod "pod-projected-secrets-fb2e7ea3-6fd2-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 10.033738ms +May 6 07:45:57.977: INFO: Pod "pod-projected-secrets-fb2e7ea3-6fd2-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01843909s +STEP: Saw pod success +May 6 07:45:57.977: INFO: Pod "pod-projected-secrets-fb2e7ea3-6fd2-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:45:57.980: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-secrets-fb2e7ea3-6fd2-11e9-a235-ba138c0d9035 container projected-secret-volume-test: +STEP: delete the pod +May 6 07:45:58.013: INFO: Waiting for pod pod-projected-secrets-fb2e7ea3-6fd2-11e9-a235-ba138c0d9035 to disappear +May 6 07:45:58.016: INFO: Pod pod-projected-secrets-fb2e7ea3-6fd2-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:45:58.016: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-j5j84" for this suite. +May 6 07:46:04.036: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:46:04.126: INFO: namespace: e2e-tests-projected-j5j84, resource: bindings, ignored listing per whitelist +May 6 07:46:04.193: INFO: namespace e2e-tests-projected-j5j84 deletion completed in 6.172029354s + +• [SLOW TEST:8.366 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Proxy server + should support proxy with --port 0 [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:46:04.196: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should support proxy with --port 0 [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: starting the proxy server +May 6 07:46:04.349: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-307990706 proxy -p 0 --disable-filter' +STEP: curling proxy /api/ output +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:46:04.445: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-r4lvw" for this suite. +May 6 07:46:10.462: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:46:10.500: INFO: namespace: e2e-tests-kubectl-r4lvw, resource: bindings, ignored listing per whitelist +May 6 07:46:10.579: INFO: namespace e2e-tests-kubectl-r4lvw deletion completed in 6.128826924s + +• [SLOW TEST:6.383 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Proxy server + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should support proxy with --port 0 [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should update pod when spec was updated and update strategy is RollingUpdate [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:46:10.579: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should update pod when spec was updated and update strategy is RollingUpdate [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 07:46:10.733: INFO: Creating simple daemon set daemon-set +STEP: Check that daemon pods launch on every node of the cluster. +May 6 07:46:10.752: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:10.756: INFO: Number of nodes with available pods: 0 +May 6 07:46:10.756: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:11.760: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:11.764: INFO: Number of nodes with available pods: 0 +May 6 07:46:11.764: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:12.768: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:12.773: INFO: Number of nodes with available pods: 0 +May 6 07:46:12.773: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:13.760: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:13.765: INFO: Number of nodes with available pods: 0 +May 6 07:46:13.765: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:14.760: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:14.763: INFO: Number of nodes with available pods: 1 +May 6 07:46:14.763: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Update daemon pods image. +STEP: Check that daemon pods images are updated. +May 6 07:46:14.807: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:14.820: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:15.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:15.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:16.830: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:16.834: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:17.835: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:17.841: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:18.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:18.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:19.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:19.828: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:20.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:20.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:21.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:21.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:22.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:22.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:23.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:23.832: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:24.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:24.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:25.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:25.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:26.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:26.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:27.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:27.831: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:28.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:28.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:29.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:29.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:30.827: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:30.832: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:31.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:31.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:32.828: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:32.833: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:33.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:33.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:34.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:34.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:35.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:35.833: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:36.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:36.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:37.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:37.831: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:38.827: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:38.831: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:39.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:39.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:40.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:40.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:41.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:41.838: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:42.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:42.830: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:43.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:43.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:44.825: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:44.829: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:45.826: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:45.832: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:46.828: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:46.832: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:47.827: INFO: Wrong image for pod: daemon-set-hfqkr. Expected: gcr.io/kubernetes-e2e-test-images/redis:1.0, got: gcr.io/kubernetes-e2e-test-images/serve-hostname:1.1. +May 6 07:46:47.827: INFO: Pod daemon-set-hfqkr is not available +May 6 07:46:47.835: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:48.824: INFO: Pod daemon-set-22jph is not available +May 6 07:46:48.828: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +STEP: Check that daemon pods are still running on every node of the cluster. +May 6 07:46:48.832: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:48.836: INFO: Number of nodes with available pods: 0 +May 6 07:46:48.836: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:49.840: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:49.844: INFO: Number of nodes with available pods: 0 +May 6 07:46:49.844: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:50.841: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:50.845: INFO: Number of nodes with available pods: 0 +May 6 07:46:50.845: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:51.840: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:51.843: INFO: Number of nodes with available pods: 0 +May 6 07:46:51.843: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:52.840: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:52.845: INFO: Number of nodes with available pods: 0 +May 6 07:46:52.845: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 07:46:53.841: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 07:46:53.846: INFO: Number of nodes with available pods: 1 +May 6 07:46:53.846: INFO: Number of running nodes: 1, number of available pods: 1 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-xp5wj, will wait for the garbage collector to delete the pods +May 6 07:46:53.924: INFO: Deleting DaemonSet.extensions daemon-set took: 6.704528ms +May 6 07:46:54.024: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.36577ms +May 6 07:47:00.228: INFO: Number of nodes with available pods: 0 +May 6 07:47:00.228: INFO: Number of running nodes: 0, number of available pods: 0 +May 6 07:47:00.231: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-xp5wj/daemonsets","resourceVersion":"6863"},"items":null} + +May 6 07:47:00.234: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-xp5wj/pods","resourceVersion":"6863"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:47:00.246: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-xp5wj" for this suite. +May 6 07:47:06.261: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:47:06.365: INFO: namespace: e2e-tests-daemonsets-xp5wj, resource: bindings, ignored listing per whitelist +May 6 07:47:06.370: INFO: namespace e2e-tests-daemonsets-xp5wj deletion completed in 6.120562983s + +• [SLOW TEST:55.791 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should update pod when spec was updated and update strategy is RollingUpdate [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Guestbook application + should create and stop a working application [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:47:06.371: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should create and stop a working application [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating all guestbook components +May 6 07:47:06.545: INFO: apiVersion: v1 +kind: Service +metadata: + name: redis-slave + labels: + app: redis + role: slave + tier: backend +spec: + ports: + - port: 6379 + selector: + app: redis + role: slave + tier: backend + +May 6 07:47:06.545: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:06.937: INFO: stderr: "" +May 6 07:47:06.937: INFO: stdout: "service/redis-slave created\n" +May 6 07:47:06.938: INFO: apiVersion: v1 +kind: Service +metadata: + name: redis-master + labels: + app: redis + role: master + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: master + tier: backend + +May 6 07:47:06.944: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:07.240: INFO: stderr: "" +May 6 07:47:07.240: INFO: stdout: "service/redis-master created\n" +May 6 07:47:07.240: INFO: apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + - port: 80 + selector: + app: guestbook + tier: frontend + +May 6 07:47:07.240: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:07.528: INFO: stderr: "" +May 6 07:47:07.528: INFO: stdout: "service/frontend created\n" +May 6 07:47:07.528: INFO: apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: frontend +spec: + replicas: 3 + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: php-redis + image: gcr.io/google-samples/gb-frontend:v6 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access environment variables to find service host + # info, comment out the 'value: dns' line above, and uncomment the + # line below: + # value: env + ports: + - containerPort: 80 + +May 6 07:47:07.528: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:07.841: INFO: stderr: "" +May 6 07:47:07.841: INFO: stdout: "deployment.extensions/frontend created\n" +May 6 07:47:07.841: INFO: apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: redis-master +spec: + replicas: 1 + template: + metadata: + labels: + app: redis + role: master + tier: backend + spec: + containers: + - name: master + image: gcr.io/kubernetes-e2e-test-images/redis:1.0 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 + +May 6 07:47:07.841: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:08.186: INFO: stderr: "" +May 6 07:47:08.186: INFO: stdout: "deployment.extensions/redis-master created\n" +May 6 07:47:08.186: INFO: apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: redis-slave +spec: + replicas: 2 + template: + metadata: + labels: + app: redis + role: slave + tier: backend + spec: + containers: + - name: slave + image: gcr.io/google-samples/gb-redisslave:v3 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access an environment variable to find the master + # service's host, comment out the 'value: dns' line above, and + # uncomment the line below: + # value: env + ports: + - containerPort: 6379 + +May 6 07:47:08.186: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:08.520: INFO: stderr: "" +May 6 07:47:08.520: INFO: stdout: "deployment.extensions/redis-slave created\n" +STEP: validating guestbook app +May 6 07:47:08.520: INFO: Waiting for all frontend pods to be Running. +May 6 07:47:33.574: INFO: Waiting for frontend to serve content. +May 6 07:47:38.597: INFO: Failed to get response from guestbook. err: , response:
+Fatal error: Uncaught exception 'Predis\Connection\ConnectionException' with message 'Connection timed out [tcp://redis-slave:6379]' in /usr/local/lib/php/Predis/Connection/AbstractConnection.php:155 +Stack trace: +#0 /usr/local/lib/php/Predis/Connection/StreamConnection.php(128): Predis\Connection\AbstractConnection->onConnectionError('Connection time...', 110) +#1 /usr/local/lib/php/Predis/Connection/StreamConnection.php(178): Predis\Connection\StreamConnection->createStreamSocket(Object(Predis\Connection\Parameters), 'tcp://redis-sla...', 4) +#2 /usr/local/lib/php/Predis/Connection/StreamConnection.php(100): Predis\Connection\StreamConnection->tcpStreamInitializer(Object(Predis\Connection\Parameters)) +#3 /usr/local/lib/php/Predis/Connection/AbstractConnection.php(81): Predis\Connection\StreamConnection->createResource() +#4 /usr/local/lib/php/Predis/Connection/StreamConnection.php(258): Predis\Connection\AbstractConnection->connect() +#5 /usr/local/lib/php/Predis/Connection/AbstractConnection.php(180): Predis\Connection\Stre in /usr/local/lib/php/Predis/Connection/AbstractConnection.php on line 155
+ +May 6 07:47:43.614: INFO: Trying to add a new entry to the guestbook. +May 6 07:47:43.628: INFO: Verifying that added entry can be retrieved. +STEP: using delete to clean up resources +May 6 07:47:43.640: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:43.802: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 07:47:43.802: INFO: stdout: "service \"redis-slave\" force deleted\n" +STEP: using delete to clean up resources +May 6 07:47:43.802: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:43.935: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 07:47:43.935: INFO: stdout: "service \"redis-master\" force deleted\n" +STEP: using delete to clean up resources +May 6 07:47:43.936: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:44.082: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 07:47:44.082: INFO: stdout: "service \"frontend\" force deleted\n" +STEP: using delete to clean up resources +May 6 07:47:44.085: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:44.245: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 07:47:44.245: INFO: stdout: "deployment.extensions \"frontend\" force deleted\n" +STEP: using delete to clean up resources +May 6 07:47:44.247: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:44.465: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 07:47:44.465: INFO: stdout: "deployment.extensions \"redis-master\" force deleted\n" +STEP: using delete to clean up resources +May 6 07:47:44.465: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wkptk' +May 6 07:47:44.639: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 07:47:44.639: INFO: stdout: "deployment.extensions \"redis-slave\" force deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:47:44.639: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-wkptk" for this suite. +May 6 07:48:22.667: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:48:22.787: INFO: namespace: e2e-tests-kubectl-wkptk, resource: bindings, ignored listing per whitelist +May 6 07:48:22.789: INFO: namespace e2e-tests-kubectl-wkptk deletion completed in 38.14147026s + +• [SLOW TEST:76.419 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Guestbook application + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create and stop a working application [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:48:22.790: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-map-52c649ae-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 07:48:22.928: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-52c702ca-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-6g4w2" to be "success or failure" +May 6 07:48:22.935: INFO: Pod "pod-projected-configmaps-52c702ca-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.256322ms +May 6 07:48:24.939: INFO: Pod "pod-projected-configmaps-52c702ca-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01093967s +STEP: Saw pod success +May 6 07:48:24.939: INFO: Pod "pod-projected-configmaps-52c702ca-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:48:24.942: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-configmaps-52c702ca-6fd3-11e9-a235-ba138c0d9035 container projected-configmap-volume-test: +STEP: delete the pod +May 6 07:48:24.973: INFO: Waiting for pod pod-projected-configmaps-52c702ca-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:48:24.977: INFO: Pod pod-projected-configmaps-52c702ca-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:48:24.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-6g4w2" for this suite. +May 6 07:48:31.016: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:48:31.088: INFO: namespace: e2e-tests-projected-6g4w2, resource: bindings, ignored listing per whitelist +May 6 07:48:31.147: INFO: namespace e2e-tests-projected-6g4w2 deletion completed in 6.145838676s + +• [SLOW TEST:8.357 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] ConfigMap + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:48:31.147: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name configmap-test-volume-57ca9747-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 07:48:31.333: INFO: Waiting up to 5m0s for pod "pod-configmaps-57cb69e0-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-s49ds" to be "success or failure" +May 6 07:48:31.339: INFO: Pod "pod-configmaps-57cb69e0-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 5.737054ms +May 6 07:48:33.344: INFO: Pod "pod-configmaps-57cb69e0-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010481935s +STEP: Saw pod success +May 6 07:48:33.344: INFO: Pod "pod-configmaps-57cb69e0-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:48:33.349: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-57cb69e0-6fd3-11e9-a235-ba138c0d9035 container configmap-volume-test: +STEP: delete the pod +May 6 07:48:33.368: INFO: Waiting for pod pod-configmaps-57cb69e0-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:48:33.378: INFO: Pod pod-configmaps-57cb69e0-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:48:33.378: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-s49ds" for this suite. +May 6 07:48:39.398: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:48:39.419: INFO: namespace: e2e-tests-configmap-s49ds, resource: bindings, ignored listing per whitelist +May 6 07:48:39.519: INFO: namespace e2e-tests-configmap-s49ds deletion completed in 6.137602267s + +• [SLOW TEST:8.372 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSS +------------------------------ +[sig-network] Services + should provide secure master service [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Services + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:48:39.519: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename services +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-network] Services + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85 +[It] should provide secure master service [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [sig-network] Services + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:48:39.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-services-hrs78" for this suite. +May 6 07:48:45.678: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:48:45.693: INFO: namespace: e2e-tests-services-hrs78, resource: bindings, ignored listing per whitelist +May 6 07:48:45.809: INFO: namespace e2e-tests-services-hrs78 deletion completed in 6.150302649s +[AfterEach] [sig-network] Services + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90 + +• [SLOW TEST:6.290 seconds] +[sig-network] Services +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should provide secure master service [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:48:45.810: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0644 on tmpfs +May 6 07:48:45.937: INFO: Waiting up to 5m0s for pod "pod-608015d5-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-qsmkm" to be "success or failure" +May 6 07:48:45.943: INFO: Pod "pod-608015d5-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.351175ms +May 6 07:48:47.948: INFO: Pod "pod-608015d5-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011438806s +STEP: Saw pod success +May 6 07:48:47.949: INFO: Pod "pod-608015d5-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:48:47.956: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-608015d5-6fd3-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 07:48:48.005: INFO: Waiting for pod pod-608015d5-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:48:48.022: INFO: Pod pod-608015d5-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:48:48.022: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-qsmkm" for this suite. +May 6 07:48:54.042: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:48:54.120: INFO: namespace: e2e-tests-emptydir-qsmkm, resource: bindings, ignored listing per whitelist +May 6 07:48:54.152: INFO: namespace e2e-tests-emptydir-qsmkm deletion completed in 6.125905038s + +• [SLOW TEST:8.343 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0644,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:48:54.153: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-657cb671-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 07:48:54.318: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-8f74v" to be "success or failure" +May 6 07:48:54.323: INFO: Pod "pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.792759ms +May 6 07:48:56.328: INFO: Pod "pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009499398s +May 6 07:48:58.333: INFO: Pod "pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.014444124s +STEP: Saw pod success +May 6 07:48:58.333: INFO: Pod "pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:48:58.336: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035 container projected-configmap-volume-test: +STEP: delete the pod +May 6 07:48:58.362: INFO: Waiting for pod pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:48:58.365: INFO: Pod pod-projected-configmaps-657eecc5-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:48:58.365: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-8f74v" for this suite. +May 6 07:49:04.385: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:49:04.482: INFO: namespace: e2e-tests-projected-8f74v, resource: bindings, ignored listing per whitelist +May 6 07:49:04.552: INFO: namespace e2e-tests-projected-8f74v deletion completed in 6.182725062s + +• [SLOW TEST:10.399 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl describe + should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:49:04.553: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 07:49:04.668: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 version --client' +May 6 07:49:04.764: INFO: stderr: "" +May 6 07:49:04.764: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.3\", GitCommit:\"721bfa751924da8d1680787490c54b9179b1fed0\", GitTreeState:\"clean\", BuildDate:\"2019-02-01T20:08:12Z\", GoVersion:\"go1.11.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n" +May 6 07:49:04.768: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-gsv5h' +May 6 07:49:05.078: INFO: stderr: "" +May 6 07:49:05.078: INFO: stdout: "replicationcontroller/redis-master created\n" +May 6 07:49:05.078: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-gsv5h' +May 6 07:49:05.414: INFO: stderr: "" +May 6 07:49:05.414: INFO: stdout: "service/redis-master created\n" +STEP: Waiting for Redis master to start. +May 6 07:49:06.419: INFO: Selector matched 1 pods for map[app:redis] +May 6 07:49:06.419: INFO: Found 0 / 1 +May 6 07:49:07.419: INFO: Selector matched 1 pods for map[app:redis] +May 6 07:49:07.419: INFO: Found 1 / 1 +May 6 07:49:07.419: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +May 6 07:49:07.423: INFO: Selector matched 1 pods for map[app:redis] +May 6 07:49:07.423: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +May 6 07:49:07.423: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 describe pod redis-master-h7zt9 --namespace=e2e-tests-kubectl-gsv5h' +May 6 07:49:07.568: INFO: stderr: "" +May 6 07:49:07.568: INFO: stdout: "Name: redis-master-h7zt9\nNamespace: e2e-tests-kubectl-gsv5h\nNode: kubernetes-cluster-2696-minion-0/10.0.0.19\nStart Time: Mon, 06 May 2019 07:49:05 +0000\nLabels: app=redis\n role=master\nAnnotations: \nStatus: Running\nIP: 10.100.112.122\nControlled By: ReplicationController/redis-master\nContainers:\n redis-master:\n Container ID: docker://83ae6e701c2dc7bf0338eb88c42232756235d686351535f0341efb68dae359ab\n Image: gcr.io/kubernetes-e2e-test-images/redis:1.0\n Image ID: docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830\n Port: 6379/TCP\n Host Port: 0/TCP\n State: Running\n Started: Mon, 06 May 2019 07:49:06 +0000\n Ready: True\n Restart Count: 0\n Environment: \n Mounts:\n /var/run/secrets/kubernetes.io/serviceaccount from default-token-x8lfs (ro)\nConditions:\n Type Status\n Initialized True \n Ready True \n ContainersReady True \n PodScheduled True \nVolumes:\n default-token-x8lfs:\n Type: Secret (a volume populated by a Secret)\n SecretName: default-token-x8lfs\n Optional: false\nQoS Class: BestEffort\nNode-Selectors: \nTolerations: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Scheduled 2s default-scheduler Successfully assigned e2e-tests-kubectl-gsv5h/redis-master-h7zt9 to kubernetes-cluster-2696-minion-0\n Normal Pulled 1s kubelet, kubernetes-cluster-2696-minion-0 Container image \"gcr.io/kubernetes-e2e-test-images/redis:1.0\" already present on machine\n Normal Created 1s kubelet, kubernetes-cluster-2696-minion-0 Created container\n Normal Started 1s kubelet, kubernetes-cluster-2696-minion-0 Started container\n" +May 6 07:49:07.568: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 describe rc redis-master --namespace=e2e-tests-kubectl-gsv5h' +May 6 07:49:07.714: INFO: stderr: "" +May 6 07:49:07.714: INFO: stdout: "Name: redis-master\nNamespace: e2e-tests-kubectl-gsv5h\nSelector: app=redis,role=master\nLabels: app=redis\n role=master\nAnnotations: \nReplicas: 1 current / 1 desired\nPods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n Labels: app=redis\n role=master\n Containers:\n redis-master:\n Image: gcr.io/kubernetes-e2e-test-images/redis:1.0\n Port: 6379/TCP\n Host Port: 0/TCP\n Environment: \n Mounts: \n Volumes: \nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal SuccessfulCreate 2s replication-controller Created pod: redis-master-h7zt9\n" +May 6 07:49:07.714: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 describe service redis-master --namespace=e2e-tests-kubectl-gsv5h' +May 6 07:49:07.849: INFO: stderr: "" +May 6 07:49:07.849: INFO: stdout: "Name: redis-master\nNamespace: e2e-tests-kubectl-gsv5h\nLabels: app=redis\n role=master\nAnnotations: \nSelector: app=redis,role=master\nType: ClusterIP\nIP: 10.254.216.193\nPort: 6379/TCP\nTargetPort: redis-server/TCP\nEndpoints: 10.100.112.122:6379\nSession Affinity: None\nEvents: \n" +May 6 07:49:07.852: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 describe node kubernetes-cluster-2696-master-0' +May 6 07:49:08.006: INFO: stderr: "" +May 6 07:49:08.006: INFO: stdout: "Name: kubernetes-cluster-2696-master-0\nRoles: master\nLabels: beta.kubernetes.io/arch=amd64\n beta.kubernetes.io/instance-type=b7d20f15-82f1-4ed4-a12e-e60277fe955f\n beta.kubernetes.io/os=linux\n failure-domain.beta.kubernetes.io/zone=MS1\n kubernetes.io/hostname=kubernetes-cluster-2696-master-0\n node-role.kubernetes.io/master=\nAnnotations: alpha.kubernetes.io/provided-node-ip: 10.0.0.21\n node.alpha.kubernetes.io/ttl: 0\n volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp: Mon, 06 May 2019 07:07:01 +0000\nTaints: CriticalAddonsOnly=True:NoSchedule\n dedicated=master:NoSchedule\nUnschedulable: false\nConditions:\n Type Status LastHeartbeatTime LastTransitionTime Reason Message\n ---- ------ ----------------- ------------------ ------ -------\n MemoryPressure False Mon, 06 May 2019 07:49:04 +0000 Mon, 06 May 2019 07:06:45 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available\n DiskPressure False Mon, 06 May 2019 07:49:04 +0000 Mon, 06 May 2019 07:06:45 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure\n PIDPressure False Mon, 06 May 2019 07:49:04 +0000 Mon, 06 May 2019 07:06:45 +0000 KubeletHasSufficientPID kubelet has sufficient PID available\n Ready True Mon, 06 May 2019 07:49:04 +0000 Mon, 06 May 2019 07:07:01 +0000 KubeletReady kubelet is posting ready status\nAddresses:\n InternalIP: 10.0.0.21\nCapacity:\n cpu: 2\n ephemeral-storage: 50162Mi\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 4038440Ki\n pods: 110\nAllocatable:\n cpu: 1800m\n ephemeral-storage: 43043835007\n hugepages-1Gi: 0\n hugepages-2Mi: 0\n memory: 3219240Ki\n pods: 110\nSystem Info:\n Machine ID: ba589e41354343558a433e0173ee5882\n System UUID: 27bd68be-c12a-447f-8b36-9db4fcfc560e\n Boot ID: 14e12d33-ffeb-46cd-96c0-5767c74afc39\n Kernel Version: 4.18.11-200.fc28.x86_64\n OS Image: Fedora 28 (Twenty Eight)\n Operating System: linux\n Architecture: amd64\n Container Runtime Version: docker://18.3.1\n Kubelet Version: v1.13.3\n Kube-Proxy Version: v1.13.3\nProviderID: openstack:///27bd68be-c12a-447f-8b36-9db4fcfc560e\nNon-terminated Pods: (7 in total)\n Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE\n --------- ---- ------------ ---------- --------------- ------------- ---\n heptio-sonobuoy sonobuoy-systemd-logs-daemon-set-03c53cfc64d4424c-fkqhm 0 (0%) 0 (0%) 0 (0%) 0 (0%) 22m\n ingress-nginx default-http-backend-566d9c7d7b-x2pcx 10m (0%) 10m (0%) 20Mi (0%) 20Mi (0%) 41m\n ingress-nginx nginx-ingress-controller-686f5dd646-6wd2r 0 (0%) 0 (0%) 0 (0%) 0 (0%) 41m\n kube-system calico-node-qs65k 250m (13%) 0 (0%) 0 (0%) 0 (0%) 41m\n kube-system coredns-66f97d7c76-htjm4 0 (0%) 0 (0%) 0 (0%) 0 (0%) 41m\n kube-system kubernetes-dashboard-8987cccfb-qbw86 0 (0%) 0 (0%) 0 (0%) 0 (0%) 41m\n kube-system openstack-cloud-controller-manager-v9bp5 200m (11%) 0 (0%) 0 (0%) 0 (0%) 41m\nAllocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n Resource Requests Limits\n -------- -------- ------\n cpu 460m (25%) 10m (0%)\n memory 20Mi (0%) 20Mi (0%)\n ephemeral-storage 0 (0%) 0 (0%)\nEvents:\n Type Reason Age From Message\n ---- ------ ---- ---- -------\n Normal Starting 42m kubelet, kubernetes-cluster-2696-master-0 Starting kubelet.\n Normal NodeHasSufficientMemory 42m (x2 over 42m) kubelet, kubernetes-cluster-2696-master-0 Node kubernetes-cluster-2696-master-0 status is now: NodeHasSufficientMemory\n Normal NodeHasNoDiskPressure 42m (x2 over 42m) kubelet, kubernetes-cluster-2696-master-0 Node kubernetes-cluster-2696-master-0 status is now: NodeHasNoDiskPressure\n Normal NodeHasSufficientPID 42m (x2 over 42m) kubelet, kubernetes-cluster-2696-master-0 Node kubernetes-cluster-2696-master-0 status is now: NodeHasSufficientPID\n Normal NodeAllocatableEnforced 42m kubelet, kubernetes-cluster-2696-master-0 Updated Node Allocatable limit across pods\n Normal NodeReady 42m kubelet, kubernetes-cluster-2696-master-0 Node kubernetes-cluster-2696-master-0 status is now: NodeReady\n" +May 6 07:49:08.006: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 describe namespace e2e-tests-kubectl-gsv5h' +May 6 07:49:08.185: INFO: stderr: "" +May 6 07:49:08.185: INFO: stdout: "Name: e2e-tests-kubectl-gsv5h\nLabels: e2e-framework=kubectl\n e2e-run=62469e8e-6fd0-11e9-a235-ba138c0d9035\nAnnotations: \nStatus: Active\n\nNo resource quota.\n\nNo resource limits.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:49:08.185: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-gsv5h" for this suite. +May 6 07:49:30.224: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:49:30.292: INFO: namespace: e2e-tests-kubectl-gsv5h, resource: bindings, ignored listing per whitelist +May 6 07:49:30.401: INFO: namespace e2e-tests-kubectl-gsv5h deletion completed in 22.209790942s + +• [SLOW TEST:25.848 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl describe + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check if kubectl describe prints relevant information for rc and pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-storage] ConfigMap + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:49:30.403: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name cm-test-opt-del-7b11512f-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating configMap with name cm-test-opt-upd-7b115182-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating the pod +STEP: Deleting configmap cm-test-opt-del-7b11512f-6fd3-11e9-a235-ba138c0d9035 +STEP: Updating configmap cm-test-opt-upd-7b115182-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating configMap with name cm-test-opt-create-7b1151a6-6fd3-11e9-a235-ba138c0d9035 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:50:49.144: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-qxjk9" for this suite. +May 6 07:51:11.161: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:51:11.244: INFO: namespace: e2e-tests-configmap-qxjk9, resource: bindings, ignored listing per whitelist +May 6 07:51:11.281: INFO: namespace e2e-tests-configmap-qxjk9 deletion completed in 22.132366407s + +• [SLOW TEST:100.879 seconds] +[sig-storage] ConfigMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-api-machinery] Secrets + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:51:11.283: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating secret e2e-tests-secrets-w27t5/secret-test-b734f7d7-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume secrets +May 6 07:51:11.417: INFO: Waiting up to 5m0s for pod "pod-configmaps-b735c4a8-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-w27t5" to be "success or failure" +May 6 07:51:11.428: INFO: Pod "pod-configmaps-b735c4a8-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 11.330405ms +May 6 07:51:13.438: INFO: Pod "pod-configmaps-b735c4a8-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020884014s +STEP: Saw pod success +May 6 07:51:13.438: INFO: Pod "pod-configmaps-b735c4a8-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:51:13.446: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-b735c4a8-6fd3-11e9-a235-ba138c0d9035 container env-test: +STEP: delete the pod +May 6 07:51:13.485: INFO: Waiting for pod pod-configmaps-b735c4a8-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:51:13.489: INFO: Pod pod-configmaps-b735c4a8-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-api-machinery] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:51:13.489: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-w27t5" for this suite. +May 6 07:51:19.513: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:51:19.591: INFO: namespace: e2e-tests-secrets-w27t5, resource: bindings, ignored listing per whitelist +May 6 07:51:19.650: INFO: namespace e2e-tests-secrets-w27t5 deletion completed in 6.157171384s + +• [SLOW TEST:8.367 seconds] +[sig-api-machinery] Secrets +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32 + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:51:19.650: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: validating cluster-info +May 6 07:51:19.777: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 cluster-info' +May 6 07:51:20.534: INFO: stderr: "" +May 6 07:51:20.534: INFO: stdout: "\x1b[0;32mKubernetes master\x1b[0m is running at \x1b[0;33mhttps://10.254.0.1:443\x1b[0m\n\x1b[0;32mCoreDNS\x1b[0m is running at \x1b[0;33mhttps://10.254.0.1:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:51:20.534: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-4hc6r" for this suite. +May 6 07:51:26.551: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:51:26.636: INFO: namespace: e2e-tests-kubectl-4hc6r, resource: bindings, ignored listing per whitelist +May 6 07:51:26.732: INFO: namespace e2e-tests-kubectl-4hc6r deletion completed in 6.192885047s + +• [SLOW TEST:7.082 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl cluster-info + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should check if Kubernetes master services is included in cluster-info [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should rollback without unnecessary restarts [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:51:26.737: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should rollback without unnecessary restarts [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 07:51:26.922: INFO: Requires at least 2 nodes (not -1) +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +May 6 07:51:26.928: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-fxbbt/daemonsets","resourceVersion":"7938"},"items":null} + +May 6 07:51:26.931: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-fxbbt/pods","resourceVersion":"7938"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:51:26.938: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-fxbbt" for this suite. +May 6 07:51:32.958: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:51:33.046: INFO: namespace: e2e-tests-daemonsets-fxbbt, resource: bindings, ignored listing per whitelist +May 6 07:51:33.095: INFO: namespace e2e-tests-daemonsets-fxbbt deletion completed in 6.152096451s + +S [SKIPPING] [6.358 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should rollback without unnecessary restarts [Conformance] [It] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 + + May 6 07:51:26.922: Requires at least 2 nodes (not -1) + + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/util.go:292 +------------------------------ +SSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:51:33.095: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0777 on tmpfs +May 6 07:51:33.300: INFO: Waiting up to 5m0s for pod "pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-6gvvl" to be "success or failure" +May 6 07:51:33.320: INFO: Pod "pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 20.13816ms +May 6 07:51:35.324: INFO: Pod "pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023890458s +May 6 07:51:37.329: INFO: Pod "pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029422797s +STEP: Saw pod success +May 6 07:51:37.329: INFO: Pod "pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:51:37.332: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 07:51:37.361: INFO: Waiting for pod pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:51:37.366: INFO: Pod pod-c4415b8a-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:51:37.366: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-6gvvl" for this suite. +May 6 07:51:43.384: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:51:43.486: INFO: namespace: e2e-tests-emptydir-6gvvl, resource: bindings, ignored listing per whitelist +May 6 07:51:43.497: INFO: namespace e2e-tests-emptydir-6gvvl deletion completed in 6.125445695s + +• [SLOW TEST:10.402 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:51:43.501: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 07:51:43.631: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-n47vp" to be "success or failure" +May 6 07:51:43.648: INFO: Pod "downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 16.788111ms +May 6 07:51:45.652: INFO: Pod "downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.020755762s +May 6 07:51:47.657: INFO: Pod "downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.025670244s +STEP: Saw pod success +May 6 07:51:47.657: INFO: Pod "downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:51:47.665: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 07:51:47.706: INFO: Waiting for pod downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:51:47.710: INFO: Pod downwardapi-volume-ca69d64a-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:51:47.710: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-n47vp" for this suite. +May 6 07:51:53.734: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:51:53.771: INFO: namespace: e2e-tests-downward-api-n47vp, resource: bindings, ignored listing per whitelist +May 6 07:51:53.875: INFO: namespace e2e-tests-downward-api-n47vp deletion completed in 6.158982292s + +• [SLOW TEST:10.375 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-storage] Projected downwardAPI + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:51:53.875: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 07:51:54.043: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d09e8c62-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-gr7w6" to be "success or failure" +May 6 07:51:54.050: INFO: Pod "downwardapi-volume-d09e8c62-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.650219ms +May 6 07:51:56.055: INFO: Pod "downwardapi-volume-d09e8c62-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01243907s +STEP: Saw pod success +May 6 07:51:56.055: INFO: Pod "downwardapi-volume-d09e8c62-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:51:56.058: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-d09e8c62-6fd3-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 07:51:56.103: INFO: Waiting for pod downwardapi-volume-d09e8c62-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:51:56.107: INFO: Pod downwardapi-volume-d09e8c62-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:51:56.108: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-gr7w6" for this suite. +May 6 07:52:02.135: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:52:02.167: INFO: namespace: e2e-tests-projected-gr7w6, resource: bindings, ignored listing per whitelist +May 6 07:52:02.264: INFO: namespace e2e-tests-projected-gr7w6 deletion completed in 6.15169651s + +• [SLOW TEST:8.389 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should set mode on item file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSS +------------------------------ +[sig-apps] ReplicationController + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:52:02.265: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[It] should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating replication controller my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035 +May 6 07:52:02.380: INFO: Pod name my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035: Found 0 pods out of 1 +May 6 07:52:07.387: INFO: Pod name my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035: Found 1 pods out of 1 +May 6 07:52:07.387: INFO: Ensuring all pods for ReplicationController "my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035" are running +May 6 07:52:07.391: INFO: Pod "my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035-2t2b7" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 07:52:02 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 07:52:04 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 07:52:04 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 07:52:02 +0000 UTC Reason: Message:}]) +May 6 07:52:07.391: INFO: Trying to dial the pod +May 6 07:52:12.406: INFO: Controller my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035: Got expected result from replica 1 [my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035-2t2b7]: "my-hostname-basic-d59681a8-6fd3-11e9-a235-ba138c0d9035-2t2b7", 1 of 1 required successes so far +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:52:12.407: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-replication-controller-l2rpb" for this suite. +May 6 07:52:18.425: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:52:18.547: INFO: namespace: e2e-tests-replication-controller-l2rpb, resource: bindings, ignored listing per whitelist +May 6 07:52:18.547: INFO: namespace e2e-tests-replication-controller-l2rpb deletion completed in 6.135984336s + +• [SLOW TEST:16.282 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should serve a basic image on each replica with a public image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:52:18.548: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0666 on tmpfs +May 6 07:52:18.710: INFO: Waiting up to 5m0s for pod "pod-df52d040-6fd3-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-8f474" to be "success or failure" +May 6 07:52:18.716: INFO: Pod "pod-df52d040-6fd3-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 5.820836ms +May 6 07:52:20.722: INFO: Pod "pod-df52d040-6fd3-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012098207s +STEP: Saw pod success +May 6 07:52:20.722: INFO: Pod "pod-df52d040-6fd3-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:52:20.725: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-df52d040-6fd3-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 07:52:20.752: INFO: Waiting for pod pod-df52d040-6fd3-11e9-a235-ba138c0d9035 to disappear +May 6 07:52:20.765: INFO: Pod pod-df52d040-6fd3-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:52:20.765: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-8f474" for this suite. +May 6 07:52:26.799: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:52:26.892: INFO: namespace: e2e-tests-emptydir-8f474, resource: bindings, ignored listing per whitelist +May 6 07:52:26.914: INFO: namespace e2e-tests-emptydir-8f474 deletion completed in 6.133446725s + +• [SLOW TEST:8.366 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-storage] Projected secret + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:52:26.915: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name s-test-opt-del-e44c22a4-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating secret with name s-test-opt-upd-e44c22f7-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating the pod +STEP: Deleting secret s-test-opt-del-e44c22a4-6fd3-11e9-a235-ba138c0d9035 +STEP: Updating secret s-test-opt-upd-e44c22f7-6fd3-11e9-a235-ba138c0d9035 +STEP: Creating secret with name s-test-opt-create-e44c2315-6fd3-11e9-a235-ba138c0d9035 +STEP: waiting to observe update in volume +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:53:47.716: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-wvn5k" for this suite. +May 6 07:54:09.750: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:54:09.844: INFO: namespace: e2e-tests-projected-wvn5k, resource: bindings, ignored listing per whitelist +May 6 07:54:09.891: INFO: namespace e2e-tests-projected-wvn5k deletion completed in 22.169661046s + +• [SLOW TEST:102.976 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + optional updates should be reflected in volume [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook + should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:54:09.893: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-lifecycle-hook +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] when create a pod with lifecycle hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61 +STEP: create the container to handle the HTTPGet hook request. +[It] should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: create the pod with lifecycle hook +STEP: check poststart hook +STEP: delete the pod with lifecycle hook +May 6 07:54:14.104: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:14.109: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:16.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:16.114: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:18.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:18.114: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:20.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:20.114: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:22.110: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:22.115: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:24.110: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:24.113: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:26.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:26.115: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:28.110: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:28.114: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:30.110: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:30.114: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:32.110: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:32.115: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:34.110: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:34.116: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:36.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:36.115: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:38.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:38.114: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:40.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:40.113: INFO: Pod pod-with-poststart-exec-hook still exists +May 6 07:54:42.109: INFO: Waiting for pod pod-with-poststart-exec-hook to disappear +May 6 07:54:42.114: INFO: Pod pod-with-poststart-exec-hook no longer exists +[AfterEach] [k8s.io] Container Lifecycle Hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:54:42.115: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-59nrc" for this suite. +May 6 07:55:04.134: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:55:04.172: INFO: namespace: e2e-tests-container-lifecycle-hook-59nrc, resource: bindings, ignored listing per whitelist +May 6 07:55:04.285: INFO: namespace e2e-tests-container-lifecycle-hook-59nrc deletion completed in 22.164713514s + +• [SLOW TEST:54.392 seconds] +[k8s.io] Container Lifecycle Hook +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when create a pod with lifecycle hook + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40 + should execute poststart exec hook properly [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:55:04.286: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename pod-network-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-vfxvc +STEP: creating a selector +STEP: Creating the service pods in kubernetes +May 6 07:55:04.412: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +May 6 07:55:26.488: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.100.112.72:8080/dial?request=hostName&protocol=udp&host=10.100.112.71&port=8081&tries=1'] Namespace:e2e-tests-pod-network-test-vfxvc PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 07:55:26.489: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 07:55:26.664: INFO: Waiting for endpoints: map[] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:55:26.664: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-vfxvc" for this suite. +May 6 07:55:48.678: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:55:48.788: INFO: namespace: e2e-tests-pod-network-test-vfxvc, resource: bindings, ignored listing per whitelist +May 6 07:55:48.807: INFO: namespace e2e-tests-pod-network-test-vfxvc deletion completed in 22.138968671s + +• [SLOW TEST:44.522 seconds] +[sig-network] Networking +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for intra-pod communication: udp [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:55:48.808: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 07:55:48.925: INFO: Waiting up to 5m0s for pod "downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-mf775" to be "success or failure" +May 6 07:55:48.937: INFO: Pod "downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 12.364685ms +May 6 07:55:50.942: INFO: Pod "downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017177094s +May 6 07:55:52.947: INFO: Pod "downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.02192571s +STEP: Saw pod success +May 6 07:55:52.947: INFO: Pod "downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:55:52.950: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 07:55:52.977: INFO: Waiting for pod downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035 to disappear +May 6 07:55:52.984: INFO: Pod downwardapi-volume-5c9edfea-6fd4-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:55:52.984: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-mf775" for this suite. +May 6 07:55:59.011: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:55:59.040: INFO: namespace: e2e-tests-projected-mf775, resource: bindings, ignored listing per whitelist +May 6 07:55:59.131: INFO: namespace e2e-tests-projected-mf775 deletion completed in 6.142309767s + +• [SLOW TEST:10.323 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for node-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:55:59.131: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename pod-network-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for node-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-t5rjv +STEP: creating a selector +STEP: Creating the service pods in kubernetes +May 6 07:55:59.273: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +May 6 07:56:19.378: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s --max-time 15 --connect-timeout 1 http://10.100.112.74:8080/hostName | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-t5rjv PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 07:56:19.378: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 07:56:19.548: INFO: Found all expected endpoints: [netserver-0] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:56:19.548: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-t5rjv" for this suite. +May 6 07:56:41.572: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:56:41.649: INFO: namespace: e2e-tests-pod-network-test-t5rjv, resource: bindings, ignored listing per whitelist +May 6 07:56:41.730: INFO: namespace e2e-tests-pod-network-test-t5rjv deletion completed in 22.178545716s + +• [SLOW TEST:42.598 seconds] +[sig-network] Networking +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for node-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[k8s.io] Pods + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:56:41.730: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +May 6 07:56:44.377: INFO: Successfully updated pod "pod-update-activedeadlineseconds-7c2a8b46-6fd4-11e9-a235-ba138c0d9035" +May 6 07:56:44.377: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-7c2a8b46-6fd4-11e9-a235-ba138c0d9035" in namespace "e2e-tests-pods-s5bcn" to be "terminated due to deadline exceeded" +May 6 07:56:44.379: INFO: Pod "pod-update-activedeadlineseconds-7c2a8b46-6fd4-11e9-a235-ba138c0d9035": Phase="Running", Reason="", readiness=true. Elapsed: 2.622174ms +May 6 07:56:46.383: INFO: Pod "pod-update-activedeadlineseconds-7c2a8b46-6fd4-11e9-a235-ba138c0d9035": Phase="Running", Reason="", readiness=true. Elapsed: 2.006507642s +May 6 07:56:48.387: INFO: Pod "pod-update-activedeadlineseconds-7c2a8b46-6fd4-11e9-a235-ba138c0d9035": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 4.010628366s +May 6 07:56:48.387: INFO: Pod "pod-update-activedeadlineseconds-7c2a8b46-6fd4-11e9-a235-ba138c0d9035" satisfied condition "terminated due to deadline exceeded" +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:56:48.388: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-s5bcn" for this suite. +May 6 07:56:54.407: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:56:54.462: INFO: namespace: e2e-tests-pods-s5bcn, resource: bindings, ignored listing per whitelist +May 6 07:56:54.507: INFO: namespace e2e-tests-pods-s5bcn deletion completed in 6.114090124s + +• [SLOW TEST:12.777 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl expose + should create services for rc [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:56:54.507: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[It] should create services for rc [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating Redis RC +May 6 07:56:54.611: INFO: namespace e2e-tests-kubectl-sx55x +May 6 07:56:54.611: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-sx55x' +May 6 07:56:54.891: INFO: stderr: "" +May 6 07:56:54.891: INFO: stdout: "replicationcontroller/redis-master created\n" +STEP: Waiting for Redis master to start. +May 6 07:56:55.895: INFO: Selector matched 1 pods for map[app:redis] +May 6 07:56:55.895: INFO: Found 0 / 1 +May 6 07:56:56.895: INFO: Selector matched 1 pods for map[app:redis] +May 6 07:56:56.895: INFO: Found 0 / 1 +May 6 07:56:57.896: INFO: Selector matched 1 pods for map[app:redis] +May 6 07:56:57.896: INFO: Found 1 / 1 +May 6 07:56:57.896: INFO: WaitFor completed with timeout 5m0s. Pods found = 1 out of 1 +May 6 07:56:57.899: INFO: Selector matched 1 pods for map[app:redis] +May 6 07:56:57.899: INFO: ForEach: Found 1 pods from the filter. Now looping through them. +May 6 07:56:57.899: INFO: wait on redis-master startup in e2e-tests-kubectl-sx55x +May 6 07:56:57.899: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 logs redis-master-mz894 redis-master --namespace=e2e-tests-kubectl-sx55x' +May 6 07:56:58.042: INFO: stderr: "" +May 6 07:56:58.042: INFO: stdout: " _._ \n _.-``__ ''-._ \n _.-`` `. `_. ''-._ Redis 3.2.12 (35a5711f/0) 64 bit\n .-`` .-```. ```\\/ _.,_ ''-._ \n ( ' , .-` | `, ) Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'| Port: 6379\n | `-._ `._ / _.-' | PID: 1\n `-._ `-._ `-./ _.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | http://redis.io \n `-._ `-._`-.__.-'_.-' _.-' \n |`-._`-._ `-.__.-' _.-'_.-'| \n | `-._`-._ _.-'_.-' | \n `-._ `-._`-.__.-'_.-' _.-' \n `-._ `-.__.-' _.-' \n `-._ _.-' \n `-.__.-' \n\n1:M 06 May 07:56:56.070 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 06 May 07:56:56.070 # Server started, Redis version 3.2.12\n1:M 06 May 07:56:56.070 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 06 May 07:56:56.070 * The server is now ready to accept connections on port 6379\n" +STEP: exposing RC +May 6 07:56:58.042: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 expose rc redis-master --name=rm2 --port=1234 --target-port=6379 --namespace=e2e-tests-kubectl-sx55x' +May 6 07:56:58.221: INFO: stderr: "" +May 6 07:56:58.221: INFO: stdout: "service/rm2 exposed\n" +May 6 07:56:58.225: INFO: Service rm2 in namespace e2e-tests-kubectl-sx55x found. +STEP: exposing service +May 6 07:57:00.232: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 expose service rm2 --name=rm3 --port=2345 --target-port=6379 --namespace=e2e-tests-kubectl-sx55x' +May 6 07:57:00.391: INFO: stderr: "" +May 6 07:57:00.391: INFO: stdout: "service/rm3 exposed\n" +May 6 07:57:00.394: INFO: Service rm3 in namespace e2e-tests-kubectl-sx55x found. +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:57:02.401: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-sx55x" for this suite. +May 6 07:57:24.418: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:57:24.533: INFO: namespace: e2e-tests-kubectl-sx55x, resource: bindings, ignored listing per whitelist +May 6 07:57:24.540: INFO: namespace e2e-tests-kubectl-sx55x deletion completed in 22.133842111s + +• [SLOW TEST:30.032 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl expose + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create services for rc [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Secrets + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:57:24.540: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename secrets +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating secret with name secret-test-95ae2cb4-6fd4-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume secrets +May 6 07:57:24.657: INFO: Waiting up to 5m0s for pod "pod-secrets-95aebbec-6fd4-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-dg592" to be "success or failure" +May 6 07:57:24.662: INFO: Pod "pod-secrets-95aebbec-6fd4-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.703496ms +May 6 07:57:26.666: INFO: Pod "pod-secrets-95aebbec-6fd4-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.008362935s +STEP: Saw pod success +May 6 07:57:26.666: INFO: Pod "pod-secrets-95aebbec-6fd4-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:57:26.668: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-95aebbec-6fd4-11e9-a235-ba138c0d9035 container secret-volume-test: +STEP: delete the pod +May 6 07:57:26.690: INFO: Waiting for pod pod-secrets-95aebbec-6fd4-11e9-a235-ba138c0d9035 to disappear +May 6 07:57:26.694: INFO: Pod pod-secrets-95aebbec-6fd4-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Secrets + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:57:26.694: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-secrets-dg592" for this suite. +May 6 07:57:32.711: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:57:32.746: INFO: namespace: e2e-tests-secrets-dg592, resource: bindings, ignored listing per whitelist +May 6 07:57:32.816: INFO: namespace e2e-tests-secrets-dg592 deletion completed in 6.118957916s + +• [SLOW TEST:8.276 seconds] +[sig-storage] Secrets +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34 + should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected secret + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected secret + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:57:32.820: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating projection with secret that has name projected-secret-test-9a9e80fb-6fd4-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume secrets +May 6 07:57:32.945: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-9a9f36dc-6fd4-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-4rnwh" to be "success or failure" +May 6 07:57:32.954: INFO: Pod "pod-projected-secrets-9a9f36dc-6fd4-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.124358ms +May 6 07:57:34.959: INFO: Pod "pod-projected-secrets-9a9f36dc-6fd4-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01317089s +STEP: Saw pod success +May 6 07:57:34.959: INFO: Pod "pod-projected-secrets-9a9f36dc-6fd4-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 07:57:34.962: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-secrets-9a9f36dc-6fd4-11e9-a235-ba138c0d9035 container projected-secret-volume-test: +STEP: delete the pod +May 6 07:57:34.992: INFO: Waiting for pod pod-projected-secrets-9a9f36dc-6fd4-11e9-a235-ba138c0d9035 to disappear +May 6 07:57:34.995: INFO: Pod pod-projected-secrets-9a9f36dc-6fd4-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected secret + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:57:34.995: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-4rnwh" for this suite. +May 6 07:57:41.018: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:57:41.166: INFO: namespace: e2e-tests-projected-4rnwh, resource: bindings, ignored listing per whitelist +May 6 07:57:41.184: INFO: namespace e2e-tests-projected-4rnwh deletion completed in 6.182639975s + +• [SLOW TEST:8.365 seconds] +[sig-storage] Projected secret +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34 + should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Downward API volume + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:57:41.185: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating the pod +May 6 07:57:43.864: INFO: Successfully updated pod "annotationupdate9f9ae827-6fd4-11e9-a235-ba138c0d9035" +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:57:47.910: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-nx7lh" for this suite. +May 6 07:58:09.929: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:58:10.085: INFO: namespace: e2e-tests-downward-api-nx7lh, resource: bindings, ignored listing per whitelist +May 6 07:58:10.085: INFO: namespace e2e-tests-downward-api-nx7lh deletion completed in 22.169258996s + +• [SLOW TEST:28.900 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[k8s.io] Pods + should be submitted and removed [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:58:10.085: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should be submitted and removed [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: setting up watch +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: verifying pod creation was observed +May 6 07:58:12.262: INFO: running pod: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-submit-remove-b0d4e6df-6fd4-11e9-a235-ba138c0d9035", GenerateName:"", Namespace:"e2e-tests-pods-5xpmk", SelfLink:"/api/v1/namespaces/e2e-tests-pods-5xpmk/pods/pod-submit-remove-b0d4e6df-6fd4-11e9-a235-ba138c0d9035", UID:"b0d9007e-6fd4-11e9-8e1b-fa163ee16beb", ResourceVersion:"9376", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63692726290, loc:(*time.Location)(0x7b47ba0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"200348979"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-pzf78", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc0018aaa40), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container(nil), Containers:[]v1.Container{v1.Container{Name:"nginx", Image:"docker.io/library/nginx:1.14-alpine", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-pzf78", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc000abbee8), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"kubernetes-cluster-2696-minion-0", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc001b253e0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc000abbf0e)}, Status:v1.PodStatus{Phase:"Running", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692726290, loc:(*time.Location)(0x7b47ba0)}}, Reason:"", Message:""}, v1.PodCondition{Type:"Ready", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692726292, loc:(*time.Location)(0x7b47ba0)}}, Reason:"", Message:""}, v1.PodCondition{Type:"ContainersReady", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692726292, loc:(*time.Location)(0x7b47ba0)}}, Reason:"", Message:""}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692726290, loc:(*time.Location)(0x7b47ba0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"10.0.0.19", PodIP:"10.100.112.80", StartTime:(*v1.Time)(0xc001483780), InitContainerStatuses:[]v1.ContainerStatus(nil), ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"nginx", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(0xc0014837a0), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:true, RestartCount:0, Image:"nginx:1.14-alpine", ImageID:"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7", ContainerID:"docker://58f6bd893d7d69572402057e8eb9bcbdac3ef5ed60c7c4383ceeced47c0c3267"}}, QOSClass:"BestEffort"}} +STEP: deleting the pod gracefully +STEP: verifying the kubelet observed the termination notice +STEP: verifying pod deletion was observed +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:58:20.157: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-5xpmk" for this suite. +May 6 07:58:26.171: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:58:26.210: INFO: namespace: e2e-tests-pods-5xpmk, resource: bindings, ignored listing per whitelist +May 6 07:58:26.309: INFO: namespace e2e-tests-pods-5xpmk deletion completed in 6.148811029s + +• [SLOW TEST:16.224 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be submitted and removed [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:58:26.310: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-mg8kf +[It] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Initializing watcher for selector baz=blah,foo=bar +STEP: Creating stateful set ss in namespace e2e-tests-statefulset-mg8kf +STEP: Waiting until all stateful set ss replicas will be running in namespace e2e-tests-statefulset-mg8kf +May 6 07:58:26.460: INFO: Found 0 stateful pods, waiting for 1 +May 6 07:58:36.469: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +STEP: Confirming that stateful set scale up will halt with unhealthy stateful pod +May 6 07:58:36.477: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:58:36.753: INFO: stderr: "" +May 6 07:58:36.753: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:58:36.753: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:58:36.758: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=true +May 6 07:58:46.763: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:58:46.763: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 07:58:46.779: INFO: Verifying statefulset ss doesn't scale past 1 for another 9.999999403s +May 6 07:58:47.785: INFO: Verifying statefulset ss doesn't scale past 1 for another 8.995963731s +May 6 07:58:48.789: INFO: Verifying statefulset ss doesn't scale past 1 for another 7.990258834s +May 6 07:58:49.795: INFO: Verifying statefulset ss doesn't scale past 1 for another 6.986015031s +May 6 07:58:50.799: INFO: Verifying statefulset ss doesn't scale past 1 for another 5.980593021s +May 6 07:58:51.805: INFO: Verifying statefulset ss doesn't scale past 1 for another 4.976316846s +May 6 07:58:52.812: INFO: Verifying statefulset ss doesn't scale past 1 for another 3.970137368s +May 6 07:58:53.824: INFO: Verifying statefulset ss doesn't scale past 1 for another 2.963171808s +May 6 07:58:54.828: INFO: Verifying statefulset ss doesn't scale past 1 for another 1.951390036s +May 6 07:58:55.834: INFO: Verifying statefulset ss doesn't scale past 1 for another 947.082357ms +STEP: Scaling up stateful set ss to 3 replicas and waiting until all of them will be running in namespace e2e-tests-statefulset-mg8kf +May 6 07:58:56.841: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 6 07:58:57.126: INFO: stderr: "" +May 6 07:58:57.126: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 6 07:58:57.126: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 6 07:58:57.131: INFO: Found 1 stateful pods, waiting for 3 +May 6 07:59:07.135: INFO: Waiting for pod ss-0 to enter Running - Ready=true, currently Running - Ready=true +May 6 07:59:07.136: INFO: Waiting for pod ss-1 to enter Running - Ready=true, currently Running - Ready=true +May 6 07:59:07.136: INFO: Waiting for pod ss-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Verifying that stateful set ss was scaled up in order +STEP: Scale down will halt with unhealthy stateful pod +May 6 07:59:07.142: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-0 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:59:07.420: INFO: stderr: "" +May 6 07:59:07.420: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:59:07.420: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-0: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:59:07.420: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:59:07.675: INFO: stderr: "" +May 6 07:59:07.675: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:59:07.675: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:59:07.675: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-2 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true' +May 6 07:59:07.968: INFO: stderr: "" +May 6 07:59:07.968: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n" +May 6 07:59:07.968: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss-2: '/usr/share/nginx/html/index.html' -> '/tmp/index.html' + +May 6 07:59:07.968: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 07:59:07.973: INFO: Waiting for stateful set status.readyReplicas to become 0, currently 2 +May 6 07:59:17.986: INFO: Waiting for pod ss-0 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:59:17.986: INFO: Waiting for pod ss-1 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:59:17.986: INFO: Waiting for pod ss-2 to enter Running - Ready=false, currently Running - Ready=false +May 6 07:59:18.011: INFO: Verifying statefulset ss doesn't scale past 3 for another 9.999999304s +May 6 07:59:19.017: INFO: Verifying statefulset ss doesn't scale past 3 for another 8.9846035s +May 6 07:59:20.022: INFO: Verifying statefulset ss doesn't scale past 3 for another 7.978744366s +May 6 07:59:21.027: INFO: Verifying statefulset ss doesn't scale past 3 for another 6.974240297s +May 6 07:59:22.041: INFO: Verifying statefulset ss doesn't scale past 3 for another 5.968880751s +May 6 07:59:23.046: INFO: Verifying statefulset ss doesn't scale past 3 for another 4.955489749s +May 6 07:59:24.050: INFO: Verifying statefulset ss doesn't scale past 3 for another 3.949996373s +May 6 07:59:25.056: INFO: Verifying statefulset ss doesn't scale past 3 for another 2.945594562s +May 6 07:59:26.062: INFO: Verifying statefulset ss doesn't scale past 3 for another 1.939412562s +May 6 07:59:27.067: INFO: Verifying statefulset ss doesn't scale past 3 for another 933.763746ms +STEP: Scaling down stateful set ss to 0 replicas and waiting until none of pods will run in namespacee2e-tests-statefulset-mg8kf +May 6 07:59:28.073: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-0 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 6 07:59:28.355: INFO: stderr: "" +May 6 07:59:28.355: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 6 07:59:28.355: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-0: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 6 07:59:28.355: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 6 07:59:28.620: INFO: stderr: "" +May 6 07:59:28.620: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 6 07:59:28.620: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 6 07:59:28.620: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-mg8kf ss-2 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true' +May 6 07:59:28.877: INFO: stderr: "" +May 6 07:59:28.877: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n" +May 6 07:59:28.877: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss-2: '/tmp/index.html' -> '/usr/share/nginx/html/index.html' + +May 6 07:59:28.877: INFO: Scaling statefulset ss to 0 +STEP: Verifying that stateful set ss was scaled down in reverse order +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +May 6 07:59:48.894: INFO: Deleting all statefulset in ns e2e-tests-statefulset-mg8kf +May 6 07:59:48.898: INFO: Scaling statefulset ss to 0 +May 6 07:59:48.913: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 07:59:48.920: INFO: Deleting statefulset ss +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:59:48.934: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-mg8kf" for this suite. +May 6 07:59:54.953: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 07:59:55.037: INFO: namespace: e2e-tests-statefulset-mg8kf, resource: bindings, ignored listing per whitelist +May 6 07:59:55.092: INFO: namespace e2e-tests-statefulset-mg8kf deletion completed in 6.153851477s + +• [SLOW TEST:88.783 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases + should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 07:59:55.095: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[It] should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 07:59:59.239: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-66p6c" for this suite. +May 6 08:00:41.260: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:00:41.385: INFO: namespace: e2e-tests-kubelet-test-66p6c, resource: bindings, ignored listing per whitelist +May 6 08:00:41.391: INFO: namespace e2e-tests-kubelet-test-66p6c deletion completed in 42.14795824s + +• [SLOW TEST:46.296 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a busybox Pod with hostAliases + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:136 + should write entries to /etc/hosts [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-apps] Deployment + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:00:41.392: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 08:00:41.528: INFO: Creating replica set "test-rolling-update-controller" (going to be adopted) +May 6 08:00:41.554: INFO: Pod name sample-pod: Found 0 pods out of 1 +May 6 08:00:46.559: INFO: Pod name sample-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +May 6 08:00:46.559: INFO: Creating deployment "test-rolling-update-deployment" +May 6 08:00:46.566: INFO: Ensuring deployment "test-rolling-update-deployment" gets the next revision from the one the adopted replica set "test-rolling-update-controller" has +May 6 08:00:46.583: INFO: new replicaset for deployment "test-rolling-update-deployment" is yet to be created +May 6 08:00:48.606: INFO: Ensuring status for deployment "test-rolling-update-deployment" is the expected +May 6 08:00:48.609: INFO: Ensuring deployment "test-rolling-update-deployment" has one old replica set (the one it adopted) +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +May 6 08:00:48.624: INFO: Deployment "test-rolling-update-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment,GenerateName:,Namespace:e2e-tests-deployment-hn5zm,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-hn5zm/deployments/test-rolling-update-deployment,UID:0e07aed5-6fd5-11e9-8e1b-fa163ee16beb,ResourceVersion:10005,Generation:1,CreationTimestamp:2019-05-06 08:00:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-05-06 08:00:46 +0000 UTC 2019-05-06 08:00:46 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-05-06 08:00:48 +0000 UTC 2019-05-06 08:00:46 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rolling-update-deployment-68b55d7bc6" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},} + +May 6 08:00:48.631: INFO: New ReplicaSet "test-rolling-update-deployment-68b55d7bc6" of Deployment "test-rolling-update-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-68b55d7bc6,GenerateName:,Namespace:e2e-tests-deployment-hn5zm,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-hn5zm/replicasets/test-rolling-update-deployment-68b55d7bc6,UID:0e0cc609-6fd5-11e9-8e1b-fa163ee16beb,ResourceVersion:9996,Generation:1,CreationTimestamp:2019-05-06 08:00:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305833,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 0e07aed5-6fd5-11e9-8e1b-fa163ee16beb 0xc0012e7ca7 0xc0012e7ca8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},} +May 6 08:00:48.631: INFO: All old ReplicaSets of Deployment "test-rolling-update-deployment": +May 6 08:00:48.631: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-controller,GenerateName:,Namespace:e2e-tests-deployment-hn5zm,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-hn5zm/replicasets/test-rolling-update-controller,UID:0b08170b-6fd5-11e9-8e1b-fa163ee16beb,ResourceVersion:10004,Generation:2,CreationTimestamp:2019-05-06 08:00:41 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 3546343826724305832,},OwnerReferences:[{apps/v1 Deployment test-rolling-update-deployment 0e07aed5-6fd5-11e9-8e1b-fa163ee16beb 0xc0012e7b97 0xc0012e7b98}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},} +May 6 08:00:48.635: INFO: Pod "test-rolling-update-deployment-68b55d7bc6-llm6w" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rolling-update-deployment-68b55d7bc6-llm6w,GenerateName:test-rolling-update-deployment-68b55d7bc6-,Namespace:e2e-tests-deployment-hn5zm,SelfLink:/api/v1/namespaces/e2e-tests-deployment-hn5zm/pods/test-rolling-update-deployment-68b55d7bc6-llm6w,UID:0e0d5c78-6fd5-11e9-8e1b-fa163ee16beb,ResourceVersion:9995,Generation:0,CreationTimestamp:2019-05-06 08:00:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod,pod-template-hash: 68b55d7bc6,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rolling-update-deployment-68b55d7bc6 0e0cc609-6fd5-11e9-8e1b-fa163ee16beb 0xc0024a3257 0xc0024a3258}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-qj47h {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-qj47h,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [{default-token-qj47h true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:00:46 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:00:48 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:00:48 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:00:46 +0000 UTC }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.89,StartTime:2019-05-06 08:00:46 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-05-06 08:00:47 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://9db68cfc3e78420bb8aa976c535829a519ab8123c50a081144e6204fca3fb944}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:00:48.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-deployment-hn5zm" for this suite. +May 6 08:00:54.668: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:00:54.783: INFO: namespace: e2e-tests-deployment-hn5zm, resource: bindings, ignored listing per whitelist +May 6 08:00:54.837: INFO: namespace e2e-tests-deployment-hn5zm deletion completed in 6.195265574s + +• [SLOW TEST:13.445 seconds] +[sig-apps] Deployment +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + RollingUpdateDeployment should delete old pods and create new ones [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[k8s.io] Docker Containers + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:00:54.845: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename containers +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test override all +May 6 08:00:54.971: INFO: Waiting up to 5m0s for pod "client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035" in namespace "e2e-tests-containers-qzpfv" to be "success or failure" +May 6 08:00:54.976: INFO: Pod "client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.288313ms +May 6 08:00:56.983: INFO: Pod "client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.01127854s +May 6 08:00:58.988: INFO: Pod "client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.016087641s +May 6 08:01:01.014: INFO: Pod "client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 6.04164161s +STEP: Saw pod success +May 6 08:01:01.014: INFO: Pod "client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:01:01.038: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 08:01:01.201: INFO: Waiting for pod client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035 to disappear +May 6 08:01:01.210: INFO: Pod client-containers-1309f95d-6fd5-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:01:01.210: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-containers-qzpfv" for this suite. +May 6 08:01:07.290: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:01:07.311: INFO: namespace: e2e-tests-containers-qzpfv, resource: bindings, ignored listing per whitelist +May 6 08:01:07.436: INFO: namespace e2e-tests-containers-qzpfv deletion completed in 6.21954062s + +• [SLOW TEST:12.592 seconds] +[k8s.io] Docker Containers +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be able to override the image's default command and arguments [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[k8s.io] Probing container + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:01:07.436: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod liveness-exec in namespace e2e-tests-container-probe-vwzbj +May 6 08:01:09.637: INFO: Started pod liveness-exec in namespace e2e-tests-container-probe-vwzbj +STEP: checking the pod's current state and verifying that restartCount is present +May 6 08:01:09.642: INFO: Initial restart count of pod liveness-exec is 0 +STEP: deleting the pod +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:05:10.329: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-vwzbj" for this suite. +May 6 08:05:16.348: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:05:16.395: INFO: namespace: e2e-tests-container-probe-vwzbj, resource: bindings, ignored listing per whitelist +May 6 08:05:16.498: INFO: namespace e2e-tests-container-probe-vwzbj deletion completed in 6.164815806s + +• [SLOW TEST:249.062 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:05:16.499: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename statefulset +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59 +[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74 +STEP: Creating service test in namespace e2e-tests-statefulset-8rnk2 +[It] should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a new StaefulSet +May 6 08:05:16.666: INFO: Found 0 stateful pods, waiting for 3 +May 6 08:05:26.671: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +May 6 08:05:26.672: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +May 6 08:05:26.672: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Updating stateful set template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine +May 6 08:05:26.705: INFO: Updating stateful set ss2 +STEP: Creating a new revision +STEP: Not applying an update when the partition is greater than the number of replicas +STEP: Performing a canary update +May 6 08:05:36.753: INFO: Updating stateful set ss2 +May 6 08:05:36.777: INFO: Waiting for Pod e2e-tests-statefulset-8rnk2/ss2-2 to have revision ss2-c79899b9 update revision ss2-787997d666 +STEP: Restoring Pods to the correct revision when they are deleted +May 6 08:05:46.866: INFO: Found 2 stateful pods, waiting for 3 +May 6 08:05:56.871: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true +May 6 08:05:56.871: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true +May 6 08:05:56.871: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true +STEP: Performing a phased rolling update +May 6 08:05:56.905: INFO: Updating stateful set ss2 +May 6 08:05:56.935: INFO: Waiting for Pod e2e-tests-statefulset-8rnk2/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666 +May 6 08:06:06.944: INFO: Waiting for Pod e2e-tests-statefulset-8rnk2/ss2-1 to have revision ss2-c79899b9 update revision ss2-787997d666 +May 6 08:06:16.967: INFO: Updating stateful set ss2 +May 6 08:06:16.990: INFO: Waiting for StatefulSet e2e-tests-statefulset-8rnk2/ss2 to complete update +May 6 08:06:16.990: INFO: Waiting for Pod e2e-tests-statefulset-8rnk2/ss2-0 to have revision ss2-c79899b9 update revision ss2-787997d666 +[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85 +May 6 08:06:27.002: INFO: Deleting all statefulset in ns e2e-tests-statefulset-8rnk2 +May 6 08:06:27.006: INFO: Scaling statefulset ss2 to 0 +May 6 08:06:37.043: INFO: Waiting for statefulset status.replicas updated to 0 +May 6 08:06:37.048: INFO: Deleting statefulset ss2 +[AfterEach] [sig-apps] StatefulSet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:06:37.084: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-statefulset-8rnk2" for this suite. +May 6 08:06:43.122: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:06:43.212: INFO: namespace: e2e-tests-statefulset-8rnk2, resource: bindings, ignored listing per whitelist +May 6 08:06:43.264: INFO: namespace e2e-tests-statefulset-8rnk2 deletion completed in 6.163163756s + +• [SLOW TEST:86.765 seconds] +[sig-apps] StatefulSet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should perform canary updates and phased rolling updates of template modifications [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:06:43.265: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 08:06:43.403: INFO: Waiting up to 5m0s for pod "downwardapi-volume-e2b8360b-6fd5-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-spzmf" to be "success or failure" +May 6 08:06:43.409: INFO: Pod "downwardapi-volume-e2b8360b-6fd5-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 5.817702ms +May 6 08:06:45.415: INFO: Pod "downwardapi-volume-e2b8360b-6fd5-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011648567s +STEP: Saw pod success +May 6 08:06:45.415: INFO: Pod "downwardapi-volume-e2b8360b-6fd5-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:06:45.419: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-e2b8360b-6fd5-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 08:06:45.458: INFO: Waiting for pod downwardapi-volume-e2b8360b-6fd5-11e9-a235-ba138c0d9035 to disappear +May 6 08:06:45.463: INFO: Pod downwardapi-volume-e2b8360b-6fd5-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:06:45.463: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-spzmf" for this suite. +May 6 08:06:51.511: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:06:51.557: INFO: namespace: e2e-tests-projected-spzmf, resource: bindings, ignored listing per whitelist +May 6 08:06:51.621: INFO: namespace e2e-tests-projected-spzmf deletion completed in 6.146237725s + +• [SLOW TEST:8.357 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's cpu limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[k8s.io] KubeletManagedEtcHosts + should test kubelet managed /etc/hosts file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] KubeletManagedEtcHosts + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:06:51.622: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename e2e-kubelet-etc-hosts +STEP: Waiting for a default service account to be provisioned in namespace +[It] should test kubelet managed /etc/hosts file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Setting up the test +STEP: Creating hostNetwork=false pod +STEP: Creating hostNetwork=true pod +STEP: Running the test +STEP: Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false +May 6 08:06:57.828: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:57.828: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:58.001: INFO: Exec stderr: "" +May 6 08:06:58.001: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:58.001: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:58.177: INFO: Exec stderr: "" +May 6 08:06:58.177: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:58.177: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:58.323: INFO: Exec stderr: "" +May 6 08:06:58.323: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:58.323: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:58.467: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount +May 6 08:06:58.467: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:58.467: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:58.598: INFO: Exec stderr: "" +May 6 08:06:58.598: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-pod ContainerName:busybox-3 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:58.598: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:58.752: INFO: Exec stderr: "" +STEP: Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true +May 6 08:06:58.752: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:58.752: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:58.919: INFO: Exec stderr: "" +May 6 08:06:58.919: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-host-network-pod ContainerName:busybox-1 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:58.919: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:59.071: INFO: Exec stderr: "" +May 6 08:06:59.071: INFO: ExecWithOptions {Command:[cat /etc/hosts] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:59.071: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:59.215: INFO: Exec stderr: "" +May 6 08:06:59.215: INFO: ExecWithOptions {Command:[cat /etc/hosts-original] Namespace:e2e-tests-e2e-kubelet-etc-hosts-9vth7 PodName:test-host-network-pod ContainerName:busybox-2 Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:06:59.215: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:06:59.369: INFO: Exec stderr: "" +[AfterEach] [k8s.io] KubeletManagedEtcHosts + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:06:59.370: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-e2e-kubelet-etc-hosts-9vth7" for this suite. +May 6 08:07:43.395: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:07:43.495: INFO: namespace: e2e-tests-e2e-kubelet-etc-hosts-9vth7, resource: bindings, ignored listing per whitelist +May 6 08:07:43.520: INFO: namespace e2e-tests-e2e-kubelet-etc-hosts-9vth7 deletion completed in 44.146412128s + +• [SLOW TEST:51.898 seconds] +[k8s.io] KubeletManagedEtcHosts +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should test kubelet managed /etc/hosts file [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:07:43.521: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-map-069e9b78-6fd6-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 08:07:43.659: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-069f7051-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-whlvh" to be "success or failure" +May 6 08:07:43.673: INFO: Pod "pod-projected-configmaps-069f7051-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 14.057419ms +May 6 08:07:45.680: INFO: Pod "pod-projected-configmaps-069f7051-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.020420123s +STEP: Saw pod success +May 6 08:07:45.680: INFO: Pod "pod-projected-configmaps-069f7051-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:07:45.682: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-configmaps-069f7051-6fd6-11e9-a235-ba138c0d9035 container projected-configmap-volume-test: +STEP: delete the pod +May 6 08:07:45.711: INFO: Waiting for pod pod-projected-configmaps-069f7051-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:07:45.720: INFO: Pod pod-projected-configmaps-069f7051-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:07:45.720: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-whlvh" for this suite. +May 6 08:07:51.741: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:07:51.873: INFO: namespace: e2e-tests-projected-whlvh, resource: bindings, ignored listing per whitelist +May 6 08:07:51.882: INFO: namespace e2e-tests-projected-whlvh deletion completed in 6.157284912s + +• [SLOW TEST:8.361 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume with mappings and Item mode set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[k8s.io] Probing container + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:07:51.885: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-probe +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48 +[It] with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 08:08:14.079: INFO: Container started at 2019-05-06 08:07:53 +0000 UTC, pod became ready at 2019-05-06 08:08:13 +0000 UTC +[AfterEach] [k8s.io] Probing container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:08:14.079: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-probe-l576w" for this suite. +May 6 08:08:36.094: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:08:36.228: INFO: namespace: e2e-tests-container-probe-l576w, resource: bindings, ignored listing per whitelist +May 6 08:08:36.308: INFO: namespace e2e-tests-container-probe-l576w deletion completed in 22.224985316s + +• [SLOW TEST:44.424 seconds] +[k8s.io] Probing container +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + with readiness probe should not be ready before initial delay and never restart [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should scale a replication controller [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:08:36.308: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295 +[It] should scale a replication controller [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a replication controller +May 6 08:08:36.455: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:37.572: INFO: stderr: "" +May 6 08:08:37.572: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +May 6 08:08:37.572: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:37.731: INFO: stderr: "" +May 6 08:08:37.731: INFO: stdout: "update-demo-nautilus-2rqk8 update-demo-nautilus-t2l8g " +May 6 08:08:37.731: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-2rqk8 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:37.878: INFO: stderr: "" +May 6 08:08:37.878: INFO: stdout: "" +May 6 08:08:37.878: INFO: update-demo-nautilus-2rqk8 is created but not running +May 6 08:08:42.878: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:43.012: INFO: stderr: "" +May 6 08:08:43.012: INFO: stdout: "update-demo-nautilus-2rqk8 update-demo-nautilus-t2l8g " +May 6 08:08:43.012: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-2rqk8 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:43.141: INFO: stderr: "" +May 6 08:08:43.141: INFO: stdout: "" +May 6 08:08:43.141: INFO: update-demo-nautilus-2rqk8 is created but not running +May 6 08:08:48.141: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:48.250: INFO: stderr: "" +May 6 08:08:48.250: INFO: stdout: "update-demo-nautilus-2rqk8 update-demo-nautilus-t2l8g " +May 6 08:08:48.250: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-2rqk8 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:48.390: INFO: stderr: "" +May 6 08:08:48.391: INFO: stdout: "true" +May 6 08:08:48.391: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-2rqk8 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:48.528: INFO: stderr: "" +May 6 08:08:48.528: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +May 6 08:08:48.528: INFO: validating pod update-demo-nautilus-2rqk8 +May 6 08:08:48.535: INFO: got data: { + "image": "nautilus.jpg" +} + +May 6 08:08:48.535: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +May 6 08:08:48.535: INFO: update-demo-nautilus-2rqk8 is verified up and running +May 6 08:08:48.536: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-t2l8g -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:48.686: INFO: stderr: "" +May 6 08:08:48.686: INFO: stdout: "true" +May 6 08:08:48.686: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-t2l8g -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:48.814: INFO: stderr: "" +May 6 08:08:48.814: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +May 6 08:08:48.814: INFO: validating pod update-demo-nautilus-t2l8g +May 6 08:08:48.821: INFO: got data: { + "image": "nautilus.jpg" +} + +May 6 08:08:48.821: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +May 6 08:08:48.821: INFO: update-demo-nautilus-t2l8g is verified up and running +STEP: scaling down the replication controller +May 6 08:08:48.823: INFO: scanned /root for discovery docs: +May 6 08:08:48.823: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 scale rc update-demo-nautilus --replicas=1 --timeout=5m --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:50.011: INFO: stderr: "" +May 6 08:08:50.011: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +May 6 08:08:50.011: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:50.201: INFO: stderr: "" +May 6 08:08:50.201: INFO: stdout: "update-demo-nautilus-2rqk8 update-demo-nautilus-t2l8g " +STEP: Replicas for name=update-demo: expected=1 actual=2 +May 6 08:08:55.201: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:55.324: INFO: stderr: "" +May 6 08:08:55.324: INFO: stdout: "update-demo-nautilus-t2l8g " +May 6 08:08:55.324: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-t2l8g -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:55.468: INFO: stderr: "" +May 6 08:08:55.468: INFO: stdout: "true" +May 6 08:08:55.468: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-t2l8g -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:55.592: INFO: stderr: "" +May 6 08:08:55.592: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +May 6 08:08:55.592: INFO: validating pod update-demo-nautilus-t2l8g +May 6 08:08:55.597: INFO: got data: { + "image": "nautilus.jpg" +} + +May 6 08:08:55.597: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +May 6 08:08:55.597: INFO: update-demo-nautilus-t2l8g is verified up and running +STEP: scaling up the replication controller +May 6 08:08:55.599: INFO: scanned /root for discovery docs: +May 6 08:08:55.599: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 scale rc update-demo-nautilus --replicas=2 --timeout=5m --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:56.776: INFO: stderr: "" +May 6 08:08:56.776: INFO: stdout: "replicationcontroller/update-demo-nautilus scaled\n" +STEP: waiting for all containers in name=update-demo pods to come up. +May 6 08:08:56.776: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:56.965: INFO: stderr: "" +May 6 08:08:56.965: INFO: stdout: "update-demo-nautilus-8hxrz update-demo-nautilus-t2l8g " +May 6 08:08:56.966: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-8hxrz -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:08:57.090: INFO: stderr: "" +May 6 08:08:57.090: INFO: stdout: "" +May 6 08:08:57.090: INFO: update-demo-nautilus-8hxrz is created but not running +May 6 08:09:02.091: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:09:02.254: INFO: stderr: "" +May 6 08:09:02.254: INFO: stdout: "update-demo-nautilus-8hxrz update-demo-nautilus-t2l8g " +May 6 08:09:02.254: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-8hxrz -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:09:02.361: INFO: stderr: "" +May 6 08:09:02.361: INFO: stdout: "true" +May 6 08:09:02.361: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-8hxrz -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:09:02.517: INFO: stderr: "" +May 6 08:09:02.517: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +May 6 08:09:02.517: INFO: validating pod update-demo-nautilus-8hxrz +May 6 08:09:02.525: INFO: got data: { + "image": "nautilus.jpg" +} + +May 6 08:09:02.525: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +May 6 08:09:02.525: INFO: update-demo-nautilus-8hxrz is verified up and running +May 6 08:09:02.525: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-t2l8g -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:09:02.671: INFO: stderr: "" +May 6 08:09:02.671: INFO: stdout: "true" +May 6 08:09:02.671: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-t2l8g -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:09:02.790: INFO: stderr: "" +May 6 08:09:02.790: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +May 6 08:09:02.790: INFO: validating pod update-demo-nautilus-t2l8g +May 6 08:09:02.796: INFO: got data: { + "image": "nautilus.jpg" +} + +May 6 08:09:02.796: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +May 6 08:09:02.796: INFO: update-demo-nautilus-t2l8g is verified up and running +STEP: using delete to clean up resources +May 6 08:09:02.796: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:09:02.923: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 08:09:02.923: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +May 6 08:09:02.923: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get rc,svc -l name=update-demo --no-headers --namespace=e2e-tests-kubectl-8w4vn' +May 6 08:09:03.145: INFO: stderr: "No resources found.\n" +May 6 08:09:03.145: INFO: stdout: "" +May 6 08:09:03.145: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -l name=update-demo --namespace=e2e-tests-kubectl-8w4vn -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +May 6 08:09:03.342: INFO: stderr: "" +May 6 08:09:03.342: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:09:03.345: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-8w4vn" for this suite. +May 6 08:09:09.396: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:09:09.451: INFO: namespace: e2e-tests-kubectl-8w4vn, resource: bindings, ignored listing per whitelist +May 6 08:09:09.508: INFO: namespace e2e-tests-kubectl-8w4vn deletion completed in 6.136113751s + +• [SLOW TEST:33.199 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Update Demo + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should scale a replication controller [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:09:09.508: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0666 on tmpfs +May 6 08:09:09.635: INFO: Waiting up to 5m0s for pod "pod-39e15c04-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-69ztb" to be "success or failure" +May 6 08:09:09.642: INFO: Pod "pod-39e15c04-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.685027ms +May 6 08:09:11.647: INFO: Pod "pod-39e15c04-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.011796987s +STEP: Saw pod success +May 6 08:09:11.647: INFO: Pod "pod-39e15c04-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:09:11.650: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-39e15c04-6fd6-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 08:09:11.680: INFO: Waiting for pod pod-39e15c04-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:09:11.685: INFO: Pod pod-39e15c04-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:09:11.686: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-69ztb" for this suite. +May 6 08:09:17.729: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:09:17.821: INFO: namespace: e2e-tests-emptydir-69ztb, resource: bindings, ignored listing per whitelist +May 6 08:09:17.874: INFO: namespace e2e-tests-emptydir-69ztb deletion completed in 6.168876222s + +• [SLOW TEST:8.366 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0666,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-api-machinery] Watchers + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:09:17.874: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a watch on configmaps with a certain label +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: changing the label value of the configmap +STEP: Expecting to observe a delete notification for the watched object +May 6 08:09:18.060: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-drdv7,SelfLink:/api/v1/namespaces/e2e-tests-watch-drdv7/configmaps/e2e-watch-test-label-changed,UID:3ee4ec82-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:11707,Generation:0,CreationTimestamp:2019-05-06 08:09:18 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},} +May 6 08:09:18.060: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-drdv7,SelfLink:/api/v1/namespaces/e2e-tests-watch-drdv7/configmaps/e2e-watch-test-label-changed,UID:3ee4ec82-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:11708,Generation:0,CreationTimestamp:2019-05-06 08:09:18 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +May 6 08:09:18.061: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-drdv7,SelfLink:/api/v1/namespaces/e2e-tests-watch-drdv7/configmaps/e2e-watch-test-label-changed,UID:3ee4ec82-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:11709,Generation:0,CreationTimestamp:2019-05-06 08:09:18 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},} +STEP: modifying the configmap a second time +STEP: Expecting not to observe a notification because the object no longer meets the selector's requirements +STEP: changing the label value of the configmap back +STEP: modifying the configmap a third time +STEP: deleting the configmap +STEP: Expecting to observe an add notification for the watched object when the label value was restored +May 6 08:09:28.106: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-drdv7,SelfLink:/api/v1/namespaces/e2e-tests-watch-drdv7/configmaps/e2e-watch-test-label-changed,UID:3ee4ec82-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:11730,Generation:0,CreationTimestamp:2019-05-06 08:09:18 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +May 6 08:09:28.106: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-drdv7,SelfLink:/api/v1/namespaces/e2e-tests-watch-drdv7/configmaps/e2e-watch-test-label-changed,UID:3ee4ec82-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:11731,Generation:0,CreationTimestamp:2019-05-06 08:09:18 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},} +May 6 08:09:28.106: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-label-changed,GenerateName:,Namespace:e2e-tests-watch-drdv7,SelfLink:/api/v1/namespaces/e2e-tests-watch-drdv7/configmaps/e2e-watch-test-label-changed,UID:3ee4ec82-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:11732,Generation:0,CreationTimestamp:2019-05-06 08:09:18 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: label-changed-and-restored,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:09:28.106: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-drdv7" for this suite. +May 6 08:09:34.124: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:09:34.145: INFO: namespace: e2e-tests-watch-drdv7, resource: bindings, ignored listing per whitelist +May 6 08:09:34.253: INFO: namespace e2e-tests-watch-drdv7 deletion completed in 6.140691817s + +• [SLOW TEST:16.379 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should observe an object deletion if it stops meeting the requirements of the selector [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Networking Granular Checks: Pods + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Networking + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:09:34.254: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename pod-network-test +STEP: Waiting for a default service account to be provisioned in namespace +[It] should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-n7kq4 +STEP: creating a selector +STEP: Creating the service pods in kubernetes +May 6 08:09:34.399: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable +STEP: Creating test pods +May 6 08:09:56.486: INFO: ExecWithOptions {Command:[/bin/sh -c curl -g -q -s 'http://10.100.112.106:8080/dial?request=hostName&protocol=http&host=10.100.112.107&port=8080&tries=1'] Namespace:e2e-tests-pod-network-test-n7kq4 PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false} +May 6 08:09:56.486: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +May 6 08:09:56.669: INFO: Waiting for endpoints: map[] +[AfterEach] [sig-network] Networking + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:09:56.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pod-network-test-n7kq4" for this suite. +May 6 08:10:18.690: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:10:18.763: INFO: namespace: e2e-tests-pod-network-test-n7kq4, resource: bindings, ignored listing per whitelist +May 6 08:10:18.824: INFO: namespace e2e-tests-pod-network-test-n7kq4 deletion completed in 22.150506514s + +• [SLOW TEST:44.571 seconds] +[sig-network] Networking +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25 + Granular Checks: Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28 + should function for intra-pod communication: http [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-apps] Daemon set [Serial] + should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:10:18.827: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a simple DaemonSet "daemon-set" +STEP: Check that daemon pods launch on every node of the cluster. +May 6 08:10:18.967: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:18.978: INFO: Number of nodes with available pods: 0 +May 6 08:10:18.978: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:10:19.988: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:19.993: INFO: Number of nodes with available pods: 0 +May 6 08:10:19.993: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:10:20.983: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:20.988: INFO: Number of nodes with available pods: 0 +May 6 08:10:20.988: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:10:21.983: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:21.987: INFO: Number of nodes with available pods: 1 +May 6 08:10:21.987: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived. +May 6 08:10:22.026: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:22.035: INFO: Number of nodes with available pods: 0 +May 6 08:10:22.035: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:10:23.040: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:23.045: INFO: Number of nodes with available pods: 0 +May 6 08:10:23.046: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:10:24.040: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:24.043: INFO: Number of nodes with available pods: 0 +May 6 08:10:24.043: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:10:25.042: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node +May 6 08:10:25.052: INFO: Number of nodes with available pods: 1 +May 6 08:10:25.052: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Wait for the failed daemon pod to be completely deleted. +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-45qlq, will wait for the garbage collector to delete the pods +May 6 08:10:25.136: INFO: Deleting DaemonSet.extensions daemon-set took: 8.148749ms +May 6 08:10:25.237: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.319076ms +May 6 08:10:58.942: INFO: Number of nodes with available pods: 0 +May 6 08:10:58.942: INFO: Number of running nodes: 0, number of available pods: 0 +May 6 08:10:58.945: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-45qlq/daemonsets","resourceVersion":"12036"},"items":null} + +May 6 08:10:58.948: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-45qlq/pods","resourceVersion":"12036"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:10:58.955: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-45qlq" for this suite. +May 6 08:11:04.984: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:11:05.105: INFO: namespace: e2e-tests-daemonsets-45qlq, resource: bindings, ignored listing per whitelist +May 6 08:11:05.105: INFO: namespace e2e-tests-daemonsets-45qlq deletion completed in 6.145653251s + +• [SLOW TEST:46.278 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should retry creating failed daemon pods [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod + should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:11:05.106: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[BeforeEach] when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81 +[It] should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:11:09.255: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-fnfmz" for this suite. +May 6 08:11:15.272: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:11:15.362: INFO: namespace: e2e-tests-kubelet-test-fnfmz, resource: bindings, ignored listing per whitelist +May 6 08:11:15.490: INFO: namespace e2e-tests-kubelet-test-fnfmz deletion completed in 6.230374997s + +• [SLOW TEST:10.384 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a busybox command that always fails in a pod + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78 + should have an terminated reason [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-api-machinery] Watchers + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:11:15.490: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename watch +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a new configmap +STEP: modifying the configmap once +STEP: modifying the configmap a second time +STEP: deleting the configmap +STEP: creating a watch on configmaps from the resource version returned by the first update +STEP: Expecting to observe notifications for all changes to the configmap after the first update +May 6 08:11:15.692: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:e2e-tests-watch-rv96w,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv96w/configmaps/e2e-watch-test-resource-version,UID:85007f52-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:12127,Generation:0,CreationTimestamp:2019-05-06 08:11:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +May 6 08:11:15.693: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-resource-version,GenerateName:,Namespace:e2e-tests-watch-rv96w,SelfLink:/api/v1/namespaces/e2e-tests-watch-rv96w/configmaps/e2e-watch-test-resource-version,UID:85007f52-6fd6-11e9-8e1b-fa163ee16beb,ResourceVersion:12128,Generation:0,CreationTimestamp:2019-05-06 08:11:15 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: from-resource-version,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},} +[AfterEach] [sig-api-machinery] Watchers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:11:15.693: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-watch-rv96w" for this suite. +May 6 08:11:21.718: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:11:21.750: INFO: namespace: e2e-tests-watch-rv96w, resource: bindings, ignored listing per whitelist +May 6 08:11:21.834: INFO: namespace e2e-tests-watch-rv96w deletion completed in 6.137909013s + +• [SLOW TEST:6.344 seconds] +[sig-api-machinery] Watchers +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22 + should be able to start watching from a specific resource version [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSS +------------------------------ +[sig-storage] EmptyDir volumes + should support (non-root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:11:21.835: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (non-root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0777 on tmpfs +May 6 08:11:22.003: INFO: Waiting up to 5m0s for pod "pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-tjxbb" to be "success or failure" +May 6 08:11:22.014: INFO: Pod "pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 10.419271ms +May 6 08:11:24.018: INFO: Pod "pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.0150157s +May 6 08:11:26.022: INFO: Pod "pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.01855258s +STEP: Saw pod success +May 6 08:11:26.022: INFO: Pod "pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:11:26.026: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 08:11:26.057: INFO: Waiting for pod pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:11:26.062: INFO: Pod pod-88c2f3e2-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:11:26.062: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-tjxbb" for this suite. +May 6 08:11:32.088: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:11:32.107: INFO: namespace: e2e-tests-emptydir-tjxbb, resource: bindings, ignored listing per whitelist +May 6 08:11:32.305: INFO: namespace e2e-tests-emptydir-tjxbb deletion completed in 6.234663696s + +• [SLOW TEST:10.470 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (non-root,0777,tmpfs) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] Projected downwardAPI + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:11:32.305: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 08:11:32.515: INFO: Waiting up to 5m0s for pod "downwardapi-volume-8f09b8bf-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-hdh7h" to be "success or failure" +May 6 08:11:32.538: INFO: Pod "downwardapi-volume-8f09b8bf-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 22.930261ms +May 6 08:11:34.559: INFO: Pod "downwardapi-volume-8f09b8bf-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.043527087s +STEP: Saw pod success +May 6 08:11:34.561: INFO: Pod "downwardapi-volume-8f09b8bf-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:11:34.566: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-8f09b8bf-6fd6-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 08:11:34.638: INFO: Waiting for pod downwardapi-volume-8f09b8bf-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:11:34.656: INFO: Pod downwardapi-volume-8f09b8bf-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:11:34.656: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-hdh7h" for this suite. +May 6 08:11:40.698: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:11:40.716: INFO: namespace: e2e-tests-projected-hdh7h, resource: bindings, ignored listing per whitelist +May 6 08:11:40.840: INFO: namespace e2e-tests-projected-hdh7h deletion completed in 6.16948732s + +• [SLOW TEST:8.535 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-network] Proxy version v1 + should proxy through a service and a pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] version v1 + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:11:40.841: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename proxy +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy through a service and a pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: starting an echo server on multiple ports +STEP: creating replication controller proxy-service-5j64g in namespace e2e-tests-proxy-dqnkq +I0506 08:11:41.012348 14 runners.go:184] Created replication controller with name: proxy-service-5j64g, namespace: e2e-tests-proxy-dqnkq, replica count: 1 +I0506 08:11:42.065548 14 runners.go:184] proxy-service-5j64g Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0506 08:11:43.066040 14 runners.go:184] proxy-service-5j64g Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0506 08:11:44.066355 14 runners.go:184] proxy-service-5j64g Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0506 08:11:45.066579 14 runners.go:184] proxy-service-5j64g Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0506 08:11:46.066908 14 runners.go:184] proxy-service-5j64g Pods: 1 out of 1 created, 0 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 1 runningButNotReady +I0506 08:11:47.067208 14 runners.go:184] proxy-service-5j64g Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +May 6 08:11:47.071: INFO: setup took 6.085879794s, starting test cases +STEP: running 16 cases, 20 attempts per case, 320 total attempts +May 6 08:11:47.081: INFO: (0) /api/v1/namespaces/e2e-tests-proxy-dqnkq/pods/proxy-service-5j64g-rmbk7:1080/proxy/: >> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename containers +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test override command +May 6 08:12:06.838: INFO: Waiting up to 5m0s for pod "client-containers-a37fef10-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-containers-2cbdq" to be "success or failure" +May 6 08:12:06.863: INFO: Pod "client-containers-a37fef10-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 24.786908ms +May 6 08:12:08.869: INFO: Pod "client-containers-a37fef10-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.030686455s +STEP: Saw pod success +May 6 08:12:08.869: INFO: Pod "client-containers-a37fef10-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:12:08.873: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod client-containers-a37fef10-6fd6-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 08:12:08.916: INFO: Waiting for pod client-containers-a37fef10-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:12:08.923: INFO: Pod client-containers-a37fef10-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [k8s.io] Docker Containers + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:12:08.923: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-containers-2cbdq" for this suite. +May 6 08:12:14.954: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:12:15.018: INFO: namespace: e2e-tests-containers-2cbdq, resource: bindings, ignored listing per whitelist +May 6 08:12:15.105: INFO: namespace e2e-tests-containers-2cbdq deletion completed in 6.170277118s + +• [SLOW TEST:8.552 seconds] +[k8s.io] Docker Containers +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSS +------------------------------ +[sig-node] Downward API + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] Downward API + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:12:15.107: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[It] should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward api env vars +May 6 08:12:15.231: INFO: Waiting up to 5m0s for pod "downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-dzkmg" to be "success or failure" +May 6 08:12:15.237: INFO: Pod "downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.053392ms +May 6 08:12:17.246: INFO: Pod "downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014377324s +May 6 08:12:19.251: INFO: Pod "downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019372094s +STEP: Saw pod success +May 6 08:12:19.251: INFO: Pod "downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:12:19.254: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035 container dapi-container: +STEP: delete the pod +May 6 08:12:19.280: INFO: Waiting for pod downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:12:19.285: INFO: Pod downward-api-a880d291-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-node] Downward API + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:12:19.285: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-dzkmg" for this suite. +May 6 08:12:25.309: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:12:25.463: INFO: namespace: e2e-tests-downward-api-dzkmg, resource: bindings, ignored listing per whitelist +May 6 08:12:25.491: INFO: namespace e2e-tests-downward-api-dzkmg deletion completed in 6.202305512s + +• [SLOW TEST:10.385 seconds] +[sig-node] Downward API +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38 + should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-network] Service endpoints latency + should not be very high [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-network] Service endpoints latency + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:12:25.493: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename svc-latency +STEP: Waiting for a default service account to be provisioned in namespace +[It] should not be very high [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating replication controller svc-latency-rc in namespace e2e-tests-svc-latency-nnnpb +I0506 08:12:25.612019 14 runners.go:184] Created replication controller with name: svc-latency-rc, namespace: e2e-tests-svc-latency-nnnpb, replica count: 1 +I0506 08:12:26.663734 14 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 0 running, 1 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +I0506 08:12:27.664066 14 runners.go:184] svc-latency-rc Pods: 1 out of 1 created, 1 running, 0 pending, 0 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady +May 6 08:12:27.776: INFO: Created: latency-svc-5p7cn +May 6 08:12:27.785: INFO: Got endpoints: latency-svc-5p7cn [20.484697ms] +May 6 08:12:27.805: INFO: Created: latency-svc-5c2h2 +May 6 08:12:27.811: INFO: Got endpoints: latency-svc-5c2h2 [25.647421ms] +May 6 08:12:27.839: INFO: Created: latency-svc-ttrt7 +May 6 08:12:27.866: INFO: Got endpoints: latency-svc-ttrt7 [77.728047ms] +May 6 08:12:27.879: INFO: Created: latency-svc-7b7nq +May 6 08:12:27.888: INFO: Got endpoints: latency-svc-7b7nq [101.805479ms] +May 6 08:12:27.900: INFO: Created: latency-svc-hwjl2 +May 6 08:12:27.910: INFO: Got endpoints: latency-svc-hwjl2 [123.731243ms] +May 6 08:12:27.912: INFO: Created: latency-svc-qzdr8 +May 6 08:12:27.926: INFO: Created: latency-svc-tfz6q +May 6 08:12:27.926: INFO: Got endpoints: latency-svc-qzdr8 [140.624896ms] +May 6 08:12:27.939: INFO: Got endpoints: latency-svc-tfz6q [152.06847ms] +May 6 08:12:27.947: INFO: Created: latency-svc-wsmkq +May 6 08:12:27.956: INFO: Got endpoints: latency-svc-wsmkq [168.521827ms] +May 6 08:12:27.965: INFO: Created: latency-svc-69wsp +May 6 08:12:27.973: INFO: Got endpoints: latency-svc-69wsp [186.053011ms] +May 6 08:12:27.987: INFO: Created: latency-svc-zb9pg +May 6 08:12:27.990: INFO: Got endpoints: latency-svc-zb9pg [202.226417ms] +May 6 08:12:27.998: INFO: Created: latency-svc-g9pct +May 6 08:12:28.011: INFO: Got endpoints: latency-svc-g9pct [222.65872ms] +May 6 08:12:28.014: INFO: Created: latency-svc-9dvdq +May 6 08:12:28.020: INFO: Got endpoints: latency-svc-9dvdq [231.224023ms] +May 6 08:12:28.029: INFO: Created: latency-svc-tj8r2 +May 6 08:12:28.038: INFO: Got endpoints: latency-svc-tj8r2 [249.6942ms] +May 6 08:12:28.043: INFO: Created: latency-svc-2htw4 +May 6 08:12:28.049: INFO: Got endpoints: latency-svc-2htw4 [260.524686ms] +May 6 08:12:28.054: INFO: Created: latency-svc-qlcjf +May 6 08:12:28.063: INFO: Created: latency-svc-7g4zx +May 6 08:12:28.065: INFO: Got endpoints: latency-svc-qlcjf [275.851ms] +May 6 08:12:28.090: INFO: Created: latency-svc-jg8tv +May 6 08:12:28.094: INFO: Got endpoints: latency-svc-7g4zx [304.709448ms] +May 6 08:12:28.108: INFO: Created: latency-svc-b8mkd +May 6 08:12:28.111: INFO: Got endpoints: latency-svc-b8mkd [245.027955ms] +May 6 08:12:28.129: INFO: Created: latency-svc-t9m6v +May 6 08:12:28.145: INFO: Got endpoints: latency-svc-t9m6v [256.486422ms] +May 6 08:12:28.145: INFO: Got endpoints: latency-svc-jg8tv [333.773836ms] +May 6 08:12:28.146: INFO: Created: latency-svc-66dxp +May 6 08:12:28.155: INFO: Got endpoints: latency-svc-66dxp [244.500061ms] +May 6 08:12:28.162: INFO: Created: latency-svc-wjqqg +May 6 08:12:28.174: INFO: Got endpoints: latency-svc-wjqqg [247.214125ms] +May 6 08:12:28.183: INFO: Created: latency-svc-mmh8w +May 6 08:12:28.202: INFO: Got endpoints: latency-svc-mmh8w [262.874456ms] +May 6 08:12:28.210: INFO: Created: latency-svc-h9zgp +May 6 08:12:28.220: INFO: Created: latency-svc-64kkn +May 6 08:12:28.220: INFO: Got endpoints: latency-svc-h9zgp [264.150788ms] +May 6 08:12:28.233: INFO: Got endpoints: latency-svc-64kkn [259.358679ms] +May 6 08:12:28.235: INFO: Created: latency-svc-g4x48 +May 6 08:12:28.251: INFO: Created: latency-svc-rsd7p +May 6 08:12:28.251: INFO: Got endpoints: latency-svc-g4x48 [261.420485ms] +May 6 08:12:28.276: INFO: Got endpoints: latency-svc-rsd7p [265.089978ms] +May 6 08:12:28.280: INFO: Created: latency-svc-85zkf +May 6 08:12:28.302: INFO: Got endpoints: latency-svc-85zkf [282.211703ms] +May 6 08:12:28.307: INFO: Created: latency-svc-pn5bm +May 6 08:12:28.322: INFO: Got endpoints: latency-svc-pn5bm [283.791244ms] +May 6 08:12:28.335: INFO: Created: latency-svc-4vp79 +May 6 08:12:28.355: INFO: Got endpoints: latency-svc-4vp79 [305.293118ms] +May 6 08:12:28.355: INFO: Created: latency-svc-hh6gp +May 6 08:12:28.355: INFO: Got endpoints: latency-svc-hh6gp [290.749036ms] +May 6 08:12:28.369: INFO: Created: latency-svc-r9pzc +May 6 08:12:28.385: INFO: Got endpoints: latency-svc-r9pzc [291.388594ms] +May 6 08:12:28.390: INFO: Created: latency-svc-gbn5z +May 6 08:12:28.417: INFO: Created: latency-svc-gngts +May 6 08:12:28.432: INFO: Got endpoints: latency-svc-gngts [286.666751ms] +May 6 08:12:28.432: INFO: Got endpoints: latency-svc-gbn5z [321.431414ms] +May 6 08:12:28.443: INFO: Created: latency-svc-t29sn +May 6 08:12:28.456: INFO: Created: latency-svc-b9hh6 +May 6 08:12:28.457: INFO: Got endpoints: latency-svc-t29sn [311.849691ms] +May 6 08:12:28.468: INFO: Created: latency-svc-497ns +May 6 08:12:28.484: INFO: Got endpoints: latency-svc-b9hh6 [328.586611ms] +May 6 08:12:28.497: INFO: Created: latency-svc-l5dbg +May 6 08:12:28.500: INFO: Got endpoints: latency-svc-497ns [326.221241ms] +May 6 08:12:28.510: INFO: Created: latency-svc-jpxkg +May 6 08:12:28.523: INFO: Got endpoints: latency-svc-jpxkg [303.306991ms] +May 6 08:12:28.524: INFO: Got endpoints: latency-svc-l5dbg [321.617735ms] +May 6 08:12:28.538: INFO: Created: latency-svc-44w4z +May 6 08:12:28.545: INFO: Got endpoints: latency-svc-44w4z [312.349775ms] +May 6 08:12:28.546: INFO: Created: latency-svc-gfzgl +May 6 08:12:28.551: INFO: Created: latency-svc-nr7cg +May 6 08:12:28.553: INFO: Got endpoints: latency-svc-gfzgl [302.065193ms] +May 6 08:12:28.558: INFO: Got endpoints: latency-svc-nr7cg [282.372511ms] +May 6 08:12:28.581: INFO: Created: latency-svc-t9nwc +May 6 08:12:28.581: INFO: Created: latency-svc-jml42 +May 6 08:12:28.584: INFO: Got endpoints: latency-svc-t9nwc [282.288112ms] +May 6 08:12:28.594: INFO: Got endpoints: latency-svc-jml42 [271.549568ms] +May 6 08:12:28.601: INFO: Created: latency-svc-mzbft +May 6 08:12:28.620: INFO: Created: latency-svc-dfkhl +May 6 08:12:28.626: INFO: Got endpoints: latency-svc-mzbft [271.407411ms] +May 6 08:12:28.631: INFO: Got endpoints: latency-svc-dfkhl [275.095603ms] +May 6 08:12:28.634: INFO: Created: latency-svc-c878m +May 6 08:12:28.651: INFO: Created: latency-svc-zzzg7 +May 6 08:12:28.653: INFO: Got endpoints: latency-svc-c878m [267.643086ms] +May 6 08:12:28.678: INFO: Got endpoints: latency-svc-zzzg7 [246.428034ms] +May 6 08:12:28.682: INFO: Created: latency-svc-tqpn4 +May 6 08:12:28.692: INFO: Created: latency-svc-sbkxx +May 6 08:12:28.692: INFO: Got endpoints: latency-svc-tqpn4 [259.95562ms] +May 6 08:12:28.712: INFO: Created: latency-svc-dr8t9 +May 6 08:12:28.716: INFO: Created: latency-svc-d676p +May 6 08:12:28.741: INFO: Created: latency-svc-wjcs7 +May 6 08:12:28.743: INFO: Got endpoints: latency-svc-sbkxx [286.599929ms] +May 6 08:12:28.749: INFO: Created: latency-svc-8hdx4 +May 6 08:12:28.768: INFO: Created: latency-svc-2td9d +May 6 08:12:28.782: INFO: Created: latency-svc-pcqhl +May 6 08:12:28.792: INFO: Got endpoints: latency-svc-dr8t9 [308.559434ms] +May 6 08:12:28.793: INFO: Created: latency-svc-kmxp5 +May 6 08:12:28.804: INFO: Created: latency-svc-gb4bl +May 6 08:12:28.817: INFO: Created: latency-svc-t6g72 +May 6 08:12:28.829: INFO: Created: latency-svc-shn8n +May 6 08:12:28.842: INFO: Created: latency-svc-kh74w +May 6 08:12:28.847: INFO: Got endpoints: latency-svc-d676p [346.532483ms] +May 6 08:12:28.866: INFO: Created: latency-svc-nhfb2 +May 6 08:12:28.883: INFO: Created: latency-svc-dfrwx +May 6 08:12:28.897: INFO: Created: latency-svc-gqkkk +May 6 08:12:28.897: INFO: Got endpoints: latency-svc-wjcs7 [373.365219ms] +May 6 08:12:28.917: INFO: Created: latency-svc-vzn88 +May 6 08:12:28.925: INFO: Created: latency-svc-mzvlw +May 6 08:12:28.938: INFO: Got endpoints: latency-svc-8hdx4 [414.301923ms] +May 6 08:12:28.945: INFO: Created: latency-svc-xbpfs +May 6 08:12:28.957: INFO: Created: latency-svc-8dgfm +May 6 08:12:28.975: INFO: Created: latency-svc-f56nf +May 6 08:12:28.995: INFO: Got endpoints: latency-svc-2td9d [449.815498ms] +May 6 08:12:29.016: INFO: Created: latency-svc-vg67s +May 6 08:12:29.034: INFO: Got endpoints: latency-svc-pcqhl [481.108931ms] +May 6 08:12:29.050: INFO: Created: latency-svc-pckwr +May 6 08:12:29.091: INFO: Got endpoints: latency-svc-kmxp5 [532.134972ms] +May 6 08:12:29.110: INFO: Created: latency-svc-pjn9h +May 6 08:12:29.133: INFO: Got endpoints: latency-svc-gb4bl [548.295561ms] +May 6 08:12:29.160: INFO: Created: latency-svc-s86j9 +May 6 08:12:29.183: INFO: Got endpoints: latency-svc-t6g72 [588.962052ms] +May 6 08:12:29.200: INFO: Created: latency-svc-tr4v7 +May 6 08:12:29.236: INFO: Got endpoints: latency-svc-shn8n [609.781641ms] +May 6 08:12:29.250: INFO: Created: latency-svc-848cq +May 6 08:12:29.284: INFO: Got endpoints: latency-svc-kh74w [652.908292ms] +May 6 08:12:29.294: INFO: Created: latency-svc-t7wcr +May 6 08:12:29.333: INFO: Got endpoints: latency-svc-nhfb2 [680.579128ms] +May 6 08:12:29.346: INFO: Created: latency-svc-8zt2z +May 6 08:12:29.383: INFO: Got endpoints: latency-svc-dfrwx [705.005335ms] +May 6 08:12:29.396: INFO: Created: latency-svc-cfbrl +May 6 08:12:29.433: INFO: Got endpoints: latency-svc-gqkkk [740.656605ms] +May 6 08:12:29.448: INFO: Created: latency-svc-gkrzp +May 6 08:12:29.486: INFO: Got endpoints: latency-svc-vzn88 [742.314162ms] +May 6 08:12:29.502: INFO: Created: latency-svc-lt52l +May 6 08:12:29.537: INFO: Got endpoints: latency-svc-mzvlw [744.380386ms] +May 6 08:12:29.551: INFO: Created: latency-svc-hz2gx +May 6 08:12:29.583: INFO: Got endpoints: latency-svc-xbpfs [736.786675ms] +May 6 08:12:29.598: INFO: Created: latency-svc-kxvbr +May 6 08:12:29.635: INFO: Got endpoints: latency-svc-8dgfm [738.450904ms] +May 6 08:12:29.665: INFO: Created: latency-svc-ghrww +May 6 08:12:29.684: INFO: Got endpoints: latency-svc-f56nf [745.968178ms] +May 6 08:12:29.699: INFO: Created: latency-svc-xbn6l +May 6 08:12:29.736: INFO: Got endpoints: latency-svc-vg67s [740.539056ms] +May 6 08:12:29.774: INFO: Created: latency-svc-bscpn +May 6 08:12:29.783: INFO: Got endpoints: latency-svc-pckwr [748.673125ms] +May 6 08:12:29.801: INFO: Created: latency-svc-dcx9t +May 6 08:12:29.834: INFO: Got endpoints: latency-svc-pjn9h [743.609209ms] +May 6 08:12:29.854: INFO: Created: latency-svc-9mzdv +May 6 08:12:29.883: INFO: Got endpoints: latency-svc-s86j9 [749.881977ms] +May 6 08:12:29.904: INFO: Created: latency-svc-qnpwx +May 6 08:12:29.933: INFO: Got endpoints: latency-svc-tr4v7 [750.289159ms] +May 6 08:12:29.954: INFO: Created: latency-svc-5ftfj +May 6 08:12:29.989: INFO: Got endpoints: latency-svc-848cq [752.997573ms] +May 6 08:12:30.050: INFO: Created: latency-svc-42xlv +May 6 08:12:30.051: INFO: Got endpoints: latency-svc-t7wcr [767.756264ms] +May 6 08:12:30.065: INFO: Created: latency-svc-sqlrq +May 6 08:12:30.089: INFO: Got endpoints: latency-svc-8zt2z [755.251492ms] +May 6 08:12:30.140: INFO: Created: latency-svc-8ht9s +May 6 08:12:30.165: INFO: Got endpoints: latency-svc-cfbrl [781.002167ms] +May 6 08:12:30.177: INFO: Created: latency-svc-5hvx5 +May 6 08:12:30.184: INFO: Got endpoints: latency-svc-gkrzp [751.069807ms] +May 6 08:12:30.198: INFO: Created: latency-svc-fh47v +May 6 08:12:30.238: INFO: Got endpoints: latency-svc-lt52l [752.283664ms] +May 6 08:12:30.252: INFO: Created: latency-svc-ckzhv +May 6 08:12:30.298: INFO: Got endpoints: latency-svc-hz2gx [761.345258ms] +May 6 08:12:30.315: INFO: Created: latency-svc-562kn +May 6 08:12:30.332: INFO: Got endpoints: latency-svc-kxvbr [748.651121ms] +May 6 08:12:30.358: INFO: Created: latency-svc-92t4k +May 6 08:12:30.397: INFO: Got endpoints: latency-svc-ghrww [762.116244ms] +May 6 08:12:30.410: INFO: Created: latency-svc-jwjgf +May 6 08:12:30.435: INFO: Got endpoints: latency-svc-xbn6l [750.446952ms] +May 6 08:12:30.455: INFO: Created: latency-svc-tqdqg +May 6 08:12:30.486: INFO: Got endpoints: latency-svc-bscpn [750.445219ms] +May 6 08:12:30.513: INFO: Created: latency-svc-lp7h4 +May 6 08:12:30.536: INFO: Got endpoints: latency-svc-dcx9t [752.599651ms] +May 6 08:12:30.551: INFO: Created: latency-svc-8tqdt +May 6 08:12:30.586: INFO: Got endpoints: latency-svc-9mzdv [751.620925ms] +May 6 08:12:30.603: INFO: Created: latency-svc-nv5n6 +May 6 08:12:30.635: INFO: Got endpoints: latency-svc-qnpwx [752.336797ms] +May 6 08:12:30.656: INFO: Created: latency-svc-dvjqf +May 6 08:12:30.685: INFO: Got endpoints: latency-svc-5ftfj [752.106124ms] +May 6 08:12:30.703: INFO: Created: latency-svc-68c9f +May 6 08:12:30.759: INFO: Got endpoints: latency-svc-42xlv [769.616283ms] +May 6 08:12:30.779: INFO: Created: latency-svc-fntbn +May 6 08:12:30.786: INFO: Got endpoints: latency-svc-sqlrq [734.50473ms] +May 6 08:12:30.807: INFO: Created: latency-svc-m794l +May 6 08:12:30.834: INFO: Got endpoints: latency-svc-8ht9s [744.927072ms] +May 6 08:12:30.852: INFO: Created: latency-svc-bbdlv +May 6 08:12:30.885: INFO: Got endpoints: latency-svc-5hvx5 [720.338678ms] +May 6 08:12:30.952: INFO: Created: latency-svc-xsjn5 +May 6 08:12:30.952: INFO: Got endpoints: latency-svc-fh47v [768.323643ms] +May 6 08:12:31.000: INFO: Created: latency-svc-n9485 +May 6 08:12:31.005: INFO: Got endpoints: latency-svc-ckzhv [767.20792ms] +May 6 08:12:31.023: INFO: Created: latency-svc-lfp9s +May 6 08:12:31.035: INFO: Got endpoints: latency-svc-562kn [736.200434ms] +May 6 08:12:31.060: INFO: Created: latency-svc-bpzqj +May 6 08:12:31.087: INFO: Got endpoints: latency-svc-92t4k [754.625107ms] +May 6 08:12:31.111: INFO: Created: latency-svc-sqkxd +May 6 08:12:31.135: INFO: Got endpoints: latency-svc-jwjgf [736.915853ms] +May 6 08:12:31.147: INFO: Created: latency-svc-m92tr +May 6 08:12:31.186: INFO: Got endpoints: latency-svc-tqdqg [751.482683ms] +May 6 08:12:31.219: INFO: Created: latency-svc-vmf69 +May 6 08:12:31.233: INFO: Got endpoints: latency-svc-lp7h4 [746.448206ms] +May 6 08:12:31.259: INFO: Created: latency-svc-56ms6 +May 6 08:12:31.285: INFO: Got endpoints: latency-svc-8tqdt [748.846844ms] +May 6 08:12:31.306: INFO: Created: latency-svc-mjvqh +May 6 08:12:31.337: INFO: Got endpoints: latency-svc-nv5n6 [751.003858ms] +May 6 08:12:31.355: INFO: Created: latency-svc-kjxp4 +May 6 08:12:31.384: INFO: Got endpoints: latency-svc-dvjqf [748.984114ms] +May 6 08:12:31.407: INFO: Created: latency-svc-69xwh +May 6 08:12:31.452: INFO: Got endpoints: latency-svc-68c9f [766.445826ms] +May 6 08:12:31.467: INFO: Created: latency-svc-mk9sn +May 6 08:12:31.484: INFO: Got endpoints: latency-svc-fntbn [725.616637ms] +May 6 08:12:31.507: INFO: Created: latency-svc-q2dbj +May 6 08:12:31.537: INFO: Got endpoints: latency-svc-m794l [750.612648ms] +May 6 08:12:31.552: INFO: Created: latency-svc-wrsdn +May 6 08:12:31.587: INFO: Got endpoints: latency-svc-bbdlv [753.559671ms] +May 6 08:12:31.604: INFO: Created: latency-svc-szj98 +May 6 08:12:31.636: INFO: Got endpoints: latency-svc-xsjn5 [750.704344ms] +May 6 08:12:31.650: INFO: Created: latency-svc-ct9b8 +May 6 08:12:31.683: INFO: Got endpoints: latency-svc-n9485 [730.633402ms] +May 6 08:12:31.701: INFO: Created: latency-svc-jsnpb +May 6 08:12:31.744: INFO: Got endpoints: latency-svc-lfp9s [738.959937ms] +May 6 08:12:31.759: INFO: Created: latency-svc-xjlhv +May 6 08:12:31.786: INFO: Got endpoints: latency-svc-bpzqj [751.041824ms] +May 6 08:12:31.798: INFO: Created: latency-svc-dpnfg +May 6 08:12:31.837: INFO: Got endpoints: latency-svc-sqkxd [749.591468ms] +May 6 08:12:31.850: INFO: Created: latency-svc-sbcts +May 6 08:12:31.887: INFO: Got endpoints: latency-svc-m92tr [752.167828ms] +May 6 08:12:31.905: INFO: Created: latency-svc-62sdm +May 6 08:12:31.944: INFO: Got endpoints: latency-svc-vmf69 [757.48926ms] +May 6 08:12:31.960: INFO: Created: latency-svc-q7x8x +May 6 08:12:31.986: INFO: Got endpoints: latency-svc-56ms6 [753.113398ms] +May 6 08:12:32.059: INFO: Created: latency-svc-ggqj4 +May 6 08:12:32.059: INFO: Got endpoints: latency-svc-mjvqh [774.282313ms] +May 6 08:12:32.078: INFO: Created: latency-svc-hmhpf +May 6 08:12:32.086: INFO: Got endpoints: latency-svc-kjxp4 [748.332183ms] +May 6 08:12:32.111: INFO: Created: latency-svc-7hj2b +May 6 08:12:32.157: INFO: Got endpoints: latency-svc-69xwh [772.258309ms] +May 6 08:12:32.191: INFO: Created: latency-svc-9ppf5 +May 6 08:12:32.197: INFO: Got endpoints: latency-svc-mk9sn [745.146341ms] +May 6 08:12:32.219: INFO: Created: latency-svc-lkp5p +May 6 08:12:32.236: INFO: Got endpoints: latency-svc-q2dbj [751.257207ms] +May 6 08:12:32.290: INFO: Created: latency-svc-fzdfr +May 6 08:12:32.296: INFO: Got endpoints: latency-svc-wrsdn [759.410408ms] +May 6 08:12:32.313: INFO: Created: latency-svc-dtvlr +May 6 08:12:32.335: INFO: Got endpoints: latency-svc-szj98 [747.675789ms] +May 6 08:12:32.354: INFO: Created: latency-svc-k7gds +May 6 08:12:32.386: INFO: Got endpoints: latency-svc-ct9b8 [749.681588ms] +May 6 08:12:32.410: INFO: Created: latency-svc-gg7c4 +May 6 08:12:32.434: INFO: Got endpoints: latency-svc-jsnpb [750.460804ms] +May 6 08:12:32.454: INFO: Created: latency-svc-9r4lv +May 6 08:12:32.485: INFO: Got endpoints: latency-svc-xjlhv [740.841865ms] +May 6 08:12:32.503: INFO: Created: latency-svc-z8phn +May 6 08:12:32.541: INFO: Got endpoints: latency-svc-dpnfg [754.545663ms] +May 6 08:12:32.562: INFO: Created: latency-svc-fhtxs +May 6 08:12:32.589: INFO: Got endpoints: latency-svc-sbcts [751.867961ms] +May 6 08:12:32.609: INFO: Created: latency-svc-hlkv2 +May 6 08:12:32.641: INFO: Got endpoints: latency-svc-62sdm [754.14285ms] +May 6 08:12:32.670: INFO: Created: latency-svc-jjh9j +May 6 08:12:32.686: INFO: Got endpoints: latency-svc-q7x8x [742.449455ms] +May 6 08:12:32.707: INFO: Created: latency-svc-vsf6r +May 6 08:12:32.739: INFO: Got endpoints: latency-svc-ggqj4 [753.060693ms] +May 6 08:12:32.766: INFO: Created: latency-svc-wqpdq +May 6 08:12:32.785: INFO: Got endpoints: latency-svc-hmhpf [725.007103ms] +May 6 08:12:32.800: INFO: Created: latency-svc-wqgw4 +May 6 08:12:32.834: INFO: Got endpoints: latency-svc-7hj2b [747.808278ms] +May 6 08:12:32.862: INFO: Created: latency-svc-rk4vk +May 6 08:12:32.885: INFO: Got endpoints: latency-svc-9ppf5 [727.874863ms] +May 6 08:12:32.899: INFO: Created: latency-svc-b95tr +May 6 08:12:32.935: INFO: Got endpoints: latency-svc-lkp5p [737.657892ms] +May 6 08:12:32.951: INFO: Created: latency-svc-5pshq +May 6 08:12:32.985: INFO: Got endpoints: latency-svc-fzdfr [748.251558ms] +May 6 08:12:33.003: INFO: Created: latency-svc-lxtx6 +May 6 08:12:33.036: INFO: Got endpoints: latency-svc-dtvlr [739.376902ms] +May 6 08:12:33.053: INFO: Created: latency-svc-bdtbd +May 6 08:12:33.086: INFO: Got endpoints: latency-svc-k7gds [750.190805ms] +May 6 08:12:33.101: INFO: Created: latency-svc-mcmgb +May 6 08:12:33.138: INFO: Got endpoints: latency-svc-gg7c4 [752.360756ms] +May 6 08:12:33.155: INFO: Created: latency-svc-cxl4m +May 6 08:12:33.191: INFO: Got endpoints: latency-svc-9r4lv [757.246136ms] +May 6 08:12:33.206: INFO: Created: latency-svc-84tfx +May 6 08:12:33.234: INFO: Got endpoints: latency-svc-z8phn [748.421278ms] +May 6 08:12:33.257: INFO: Created: latency-svc-hl5xx +May 6 08:12:33.285: INFO: Got endpoints: latency-svc-fhtxs [744.042849ms] +May 6 08:12:33.302: INFO: Created: latency-svc-g2xdd +May 6 08:12:33.333: INFO: Got endpoints: latency-svc-hlkv2 [743.915841ms] +May 6 08:12:33.357: INFO: Created: latency-svc-c4h8p +May 6 08:12:33.386: INFO: Got endpoints: latency-svc-jjh9j [744.384793ms] +May 6 08:12:33.415: INFO: Created: latency-svc-rk2d8 +May 6 08:12:33.436: INFO: Got endpoints: latency-svc-vsf6r [749.604058ms] +May 6 08:12:33.457: INFO: Created: latency-svc-pkrhp +May 6 08:12:33.488: INFO: Got endpoints: latency-svc-wqpdq [748.721213ms] +May 6 08:12:33.506: INFO: Created: latency-svc-tcnrz +May 6 08:12:33.534: INFO: Got endpoints: latency-svc-wqgw4 [749.723246ms] +May 6 08:12:33.556: INFO: Created: latency-svc-jshqf +May 6 08:12:33.587: INFO: Got endpoints: latency-svc-rk4vk [753.011605ms] +May 6 08:12:33.625: INFO: Created: latency-svc-mxbmh +May 6 08:12:33.652: INFO: Got endpoints: latency-svc-b95tr [767.369179ms] +May 6 08:12:33.675: INFO: Created: latency-svc-g8j2d +May 6 08:12:33.686: INFO: Got endpoints: latency-svc-5pshq [750.820769ms] +May 6 08:12:33.706: INFO: Created: latency-svc-tvsmd +May 6 08:12:33.734: INFO: Got endpoints: latency-svc-lxtx6 [748.841195ms] +May 6 08:12:33.754: INFO: Created: latency-svc-vdpl9 +May 6 08:12:33.787: INFO: Got endpoints: latency-svc-bdtbd [751.024444ms] +May 6 08:12:33.803: INFO: Created: latency-svc-ls96z +May 6 08:12:33.835: INFO: Got endpoints: latency-svc-mcmgb [749.091439ms] +May 6 08:12:33.852: INFO: Created: latency-svc-s66cg +May 6 08:12:33.889: INFO: Got endpoints: latency-svc-cxl4m [750.398238ms] +May 6 08:12:33.924: INFO: Created: latency-svc-vzvng +May 6 08:12:33.936: INFO: Got endpoints: latency-svc-84tfx [744.483302ms] +May 6 08:12:33.964: INFO: Created: latency-svc-bgf7k +May 6 08:12:34.038: INFO: Got endpoints: latency-svc-hl5xx [804.26239ms] +May 6 08:12:34.047: INFO: Got endpoints: latency-svc-g2xdd [761.824324ms] +May 6 08:12:34.064: INFO: Created: latency-svc-mp9lp +May 6 08:12:34.081: INFO: Created: latency-svc-krpms +May 6 08:12:34.084: INFO: Got endpoints: latency-svc-c4h8p [750.840901ms] +May 6 08:12:34.109: INFO: Created: latency-svc-zwjwt +May 6 08:12:34.155: INFO: Got endpoints: latency-svc-rk2d8 [769.046194ms] +May 6 08:12:34.194: INFO: Got endpoints: latency-svc-pkrhp [757.683106ms] +May 6 08:12:34.196: INFO: Created: latency-svc-tbb68 +May 6 08:12:34.219: INFO: Created: latency-svc-q5f4k +May 6 08:12:34.236: INFO: Got endpoints: latency-svc-tcnrz [747.852448ms] +May 6 08:12:34.262: INFO: Created: latency-svc-x8b59 +May 6 08:12:34.289: INFO: Got endpoints: latency-svc-jshqf [754.422797ms] +May 6 08:12:34.317: INFO: Created: latency-svc-s56w8 +May 6 08:12:34.335: INFO: Got endpoints: latency-svc-mxbmh [747.756229ms] +May 6 08:12:34.379: INFO: Created: latency-svc-snjwd +May 6 08:12:34.399: INFO: Got endpoints: latency-svc-g8j2d [746.14845ms] +May 6 08:12:34.467: INFO: Got endpoints: latency-svc-tvsmd [781.388223ms] +May 6 08:12:34.477: INFO: Created: latency-svc-fgk7v +May 6 08:12:34.505: INFO: Got endpoints: latency-svc-vdpl9 [770.639766ms] +May 6 08:12:34.520: INFO: Created: latency-svc-tw9pp +May 6 08:12:34.537: INFO: Created: latency-svc-59rmz +May 6 08:12:34.542: INFO: Got endpoints: latency-svc-ls96z [754.693494ms] +May 6 08:12:34.572: INFO: Created: latency-svc-hc6nd +May 6 08:12:34.589: INFO: Got endpoints: latency-svc-s66cg [753.707705ms] +May 6 08:12:34.616: INFO: Created: latency-svc-w27km +May 6 08:12:34.634: INFO: Got endpoints: latency-svc-vzvng [745.745211ms] +May 6 08:12:34.652: INFO: Created: latency-svc-6tjqk +May 6 08:12:34.684: INFO: Got endpoints: latency-svc-bgf7k [747.037613ms] +May 6 08:12:34.703: INFO: Created: latency-svc-qwvd2 +May 6 08:12:34.735: INFO: Got endpoints: latency-svc-mp9lp [696.203321ms] +May 6 08:12:34.757: INFO: Created: latency-svc-f89l8 +May 6 08:12:34.783: INFO: Got endpoints: latency-svc-krpms [736.385936ms] +May 6 08:12:34.807: INFO: Created: latency-svc-r6nhd +May 6 08:12:34.833: INFO: Got endpoints: latency-svc-zwjwt [749.070249ms] +May 6 08:12:34.849: INFO: Created: latency-svc-z4b4j +May 6 08:12:34.885: INFO: Got endpoints: latency-svc-tbb68 [730.033731ms] +May 6 08:12:34.904: INFO: Created: latency-svc-jnbhj +May 6 08:12:34.941: INFO: Got endpoints: latency-svc-q5f4k [747.125609ms] +May 6 08:12:34.956: INFO: Created: latency-svc-k9lcz +May 6 08:12:35.012: INFO: Got endpoints: latency-svc-x8b59 [776.038114ms] +May 6 08:12:35.061: INFO: Got endpoints: latency-svc-s56w8 [772.010704ms] +May 6 08:12:35.072: INFO: Created: latency-svc-xdl9l +May 6 08:12:35.084: INFO: Created: latency-svc-v74qj +May 6 08:12:35.087: INFO: Got endpoints: latency-svc-snjwd [752.596444ms] +May 6 08:12:35.105: INFO: Created: latency-svc-hcbnm +May 6 08:12:35.135: INFO: Got endpoints: latency-svc-fgk7v [736.146924ms] +May 6 08:12:35.150: INFO: Created: latency-svc-km2ws +May 6 08:12:35.185: INFO: Got endpoints: latency-svc-tw9pp [717.400451ms] +May 6 08:12:35.200: INFO: Created: latency-svc-rh2gc +May 6 08:12:35.233: INFO: Got endpoints: latency-svc-59rmz [728.176908ms] +May 6 08:12:35.246: INFO: Created: latency-svc-vjp5m +May 6 08:12:35.283: INFO: Got endpoints: latency-svc-hc6nd [741.014133ms] +May 6 08:12:35.318: INFO: Created: latency-svc-pfz9l +May 6 08:12:35.340: INFO: Got endpoints: latency-svc-w27km [750.817347ms] +May 6 08:12:35.367: INFO: Created: latency-svc-xccfx +May 6 08:12:35.414: INFO: Got endpoints: latency-svc-6tjqk [779.659218ms] +May 6 08:12:35.462: INFO: Got endpoints: latency-svc-qwvd2 [778.033507ms] +May 6 08:12:35.463: INFO: Created: latency-svc-x7rrc +May 6 08:12:35.484: INFO: Created: latency-svc-cbzhw +May 6 08:12:35.486: INFO: Got endpoints: latency-svc-f89l8 [750.696818ms] +May 6 08:12:35.506: INFO: Created: latency-svc-lqxts +May 6 08:12:35.538: INFO: Got endpoints: latency-svc-r6nhd [754.311826ms] +May 6 08:12:35.551: INFO: Created: latency-svc-pd2cm +May 6 08:12:35.584: INFO: Got endpoints: latency-svc-z4b4j [750.938656ms] +May 6 08:12:35.599: INFO: Created: latency-svc-tz2vk +May 6 08:12:35.639: INFO: Got endpoints: latency-svc-jnbhj [753.767953ms] +May 6 08:12:35.683: INFO: Got endpoints: latency-svc-k9lcz [741.400692ms] +May 6 08:12:35.752: INFO: Got endpoints: latency-svc-xdl9l [739.308535ms] +May 6 08:12:35.784: INFO: Got endpoints: latency-svc-v74qj [723.288921ms] +May 6 08:12:35.833: INFO: Got endpoints: latency-svc-hcbnm [746.073633ms] +May 6 08:12:35.885: INFO: Got endpoints: latency-svc-km2ws [749.941219ms] +May 6 08:12:35.936: INFO: Got endpoints: latency-svc-rh2gc [750.912007ms] +May 6 08:12:35.983: INFO: Got endpoints: latency-svc-vjp5m [750.469353ms] +May 6 08:12:36.033: INFO: Got endpoints: latency-svc-pfz9l [750.13005ms] +May 6 08:12:36.084: INFO: Got endpoints: latency-svc-xccfx [744.456932ms] +May 6 08:12:36.134: INFO: Got endpoints: latency-svc-x7rrc [719.202496ms] +May 6 08:12:36.191: INFO: Got endpoints: latency-svc-cbzhw [729.240121ms] +May 6 08:12:36.240: INFO: Got endpoints: latency-svc-lqxts [753.818508ms] +May 6 08:12:36.286: INFO: Got endpoints: latency-svc-pd2cm [748.458451ms] +May 6 08:12:36.334: INFO: Got endpoints: latency-svc-tz2vk [750.06211ms] +May 6 08:12:36.334: INFO: Latencies: [25.647421ms 77.728047ms 101.805479ms 123.731243ms 140.624896ms 152.06847ms 168.521827ms 186.053011ms 202.226417ms 222.65872ms 231.224023ms 244.500061ms 245.027955ms 246.428034ms 247.214125ms 249.6942ms 256.486422ms 259.358679ms 259.95562ms 260.524686ms 261.420485ms 262.874456ms 264.150788ms 265.089978ms 267.643086ms 271.407411ms 271.549568ms 275.095603ms 275.851ms 282.211703ms 282.288112ms 282.372511ms 283.791244ms 286.599929ms 286.666751ms 290.749036ms 291.388594ms 302.065193ms 303.306991ms 304.709448ms 305.293118ms 308.559434ms 311.849691ms 312.349775ms 321.431414ms 321.617735ms 326.221241ms 328.586611ms 333.773836ms 346.532483ms 373.365219ms 414.301923ms 449.815498ms 481.108931ms 532.134972ms 548.295561ms 588.962052ms 609.781641ms 652.908292ms 680.579128ms 696.203321ms 705.005335ms 717.400451ms 719.202496ms 720.338678ms 723.288921ms 725.007103ms 725.616637ms 727.874863ms 728.176908ms 729.240121ms 730.033731ms 730.633402ms 734.50473ms 736.146924ms 736.200434ms 736.385936ms 736.786675ms 736.915853ms 737.657892ms 738.450904ms 738.959937ms 739.308535ms 739.376902ms 740.539056ms 740.656605ms 740.841865ms 741.014133ms 741.400692ms 742.314162ms 742.449455ms 743.609209ms 743.915841ms 744.042849ms 744.380386ms 744.384793ms 744.456932ms 744.483302ms 744.927072ms 745.146341ms 745.745211ms 745.968178ms 746.073633ms 746.14845ms 746.448206ms 747.037613ms 747.125609ms 747.675789ms 747.756229ms 747.808278ms 747.852448ms 748.251558ms 748.332183ms 748.421278ms 748.458451ms 748.651121ms 748.673125ms 748.721213ms 748.841195ms 748.846844ms 748.984114ms 749.070249ms 749.091439ms 749.591468ms 749.604058ms 749.681588ms 749.723246ms 749.881977ms 749.941219ms 750.06211ms 750.13005ms 750.190805ms 750.289159ms 750.398238ms 750.445219ms 750.446952ms 750.460804ms 750.469353ms 750.612648ms 750.696818ms 750.704344ms 750.817347ms 750.820769ms 750.840901ms 750.912007ms 750.938656ms 751.003858ms 751.024444ms 751.041824ms 751.069807ms 751.257207ms 751.482683ms 751.620925ms 751.867961ms 752.106124ms 752.167828ms 752.283664ms 752.336797ms 752.360756ms 752.596444ms 752.599651ms 752.997573ms 753.011605ms 753.060693ms 753.113398ms 753.559671ms 753.707705ms 753.767953ms 753.818508ms 754.14285ms 754.311826ms 754.422797ms 754.545663ms 754.625107ms 754.693494ms 755.251492ms 757.246136ms 757.48926ms 757.683106ms 759.410408ms 761.345258ms 761.824324ms 762.116244ms 766.445826ms 767.20792ms 767.369179ms 767.756264ms 768.323643ms 769.046194ms 769.616283ms 770.639766ms 772.010704ms 772.258309ms 774.282313ms 776.038114ms 778.033507ms 779.659218ms 781.002167ms 781.388223ms 804.26239ms] +May 6 08:12:36.335: INFO: 50 %ile: 745.745211ms +May 6 08:12:36.335: INFO: 90 %ile: 761.345258ms +May 6 08:12:36.335: INFO: 99 %ile: 781.388223ms +May 6 08:12:36.335: INFO: Total sample count: 200 +[AfterEach] [sig-network] Service endpoints latency + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:12:36.335: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-svc-latency-nnnpb" for this suite. +May 6 08:13:02.362: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:13:02.397: INFO: namespace: e2e-tests-svc-latency-nnnpb, resource: bindings, ignored listing per whitelist +May 6 08:13:02.525: INFO: namespace e2e-tests-svc-latency-nnnpb deletion completed in 26.179005626s + +• [SLOW TEST:37.033 seconds] +[sig-network] Service endpoints latency +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22 + should not be very high [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSS +------------------------------ +[k8s.io] Kubelet when scheduling a read only busybox container + should not write to root filesystem [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:13:02.526: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubelet-test +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37 +[It] should not write to root filesystem [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[AfterEach] [k8s.io] Kubelet + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:13:04.708: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubelet-test-k2jsv" for this suite. +May 6 08:13:54.728: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:13:54.870: INFO: namespace: e2e-tests-kubelet-test-k2jsv, resource: bindings, ignored listing per whitelist +May 6 08:13:54.874: INFO: namespace e2e-tests-kubelet-test-k2jsv deletion completed in 50.160604357s + +• [SLOW TEST:52.348 seconds] +[k8s.io] Kubelet +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + when scheduling a read only busybox container + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:186 + should not write to root filesystem [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-node] ConfigMap + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-node] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:13:54.875: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename configmap +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap e2e-tests-configmap-6fx69/configmap-test-e3fe7889-6fd6-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 08:13:55.046: INFO: Waiting up to 5m0s for pod "pod-configmaps-e3ff278b-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-6fx69" to be "success or failure" +May 6 08:13:55.059: INFO: Pod "pod-configmaps-e3ff278b-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.674243ms +May 6 08:13:57.065: INFO: Pod "pod-configmaps-e3ff278b-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013874344s +STEP: Saw pod success +May 6 08:13:57.065: INFO: Pod "pod-configmaps-e3ff278b-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:13:57.072: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-e3ff278b-6fd6-11e9-a235-ba138c0d9035 container env-test: +STEP: delete the pod +May 6 08:13:57.104: INFO: Waiting for pod pod-configmaps-e3ff278b-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:13:57.108: INFO: Pod pod-configmaps-e3ff278b-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-node] ConfigMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:13:57.108: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-configmap-6fx69" for this suite. +May 6 08:14:03.156: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:14:03.299: INFO: namespace: e2e-tests-configmap-6fx69, resource: bindings, ignored listing per whitelist +May 6 08:14:03.325: INFO: namespace e2e-tests-configmap-6fx69 deletion completed in 6.206041485s + +• [SLOW TEST:8.449 seconds] +[sig-node] ConfigMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31 + should be consumable via the environment [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[k8s.io] InitContainer [NodeConformance] + should invoke init containers on a RestartNever pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:14:03.325: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename init-container +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43 +[It] should invoke init containers on a RestartNever pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +May 6 08:14:03.459: INFO: PodSpec: initContainers in spec.initContainers +[AfterEach] [k8s.io] InitContainer [NodeConformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:14:07.274: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-init-container-zbp7t" for this suite. +May 6 08:14:13.293: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:14:13.376: INFO: namespace: e2e-tests-init-container-zbp7t, resource: bindings, ignored listing per whitelist +May 6 08:14:13.466: INFO: namespace e2e-tests-init-container-zbp7t deletion completed in 6.186726514s + +• [SLOW TEST:10.141 seconds] +[k8s.io] InitContainer [NodeConformance] +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should invoke init containers on a RestartNever pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:14:13.467: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 08:14:13.649: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ef163f4e-6fd6-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-wk22h" to be "success or failure" +May 6 08:14:13.658: INFO: Pod "downwardapi-volume-ef163f4e-6fd6-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.928136ms +May 6 08:14:15.662: INFO: Pod "downwardapi-volume-ef163f4e-6fd6-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013601656s +STEP: Saw pod success +May 6 08:14:15.662: INFO: Pod "downwardapi-volume-ef163f4e-6fd6-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:14:15.665: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-ef163f4e-6fd6-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 08:14:15.691: INFO: Waiting for pod downwardapi-volume-ef163f4e-6fd6-11e9-a235-ba138c0d9035 to disappear +May 6 08:14:15.700: INFO: Pod downwardapi-volume-ef163f4e-6fd6-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:14:15.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-wk22h" for this suite. +May 6 08:14:21.749: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:14:21.971: INFO: namespace: e2e-tests-projected-wk22h, resource: bindings, ignored listing per whitelist +May 6 08:14:21.980: INFO: namespace e2e-tests-projected-wk22h deletion completed in 6.273652252s + +• [SLOW TEST:8.513 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide podname only [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[k8s.io] Container Runtime blackbox test when starting a container that exits + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Container Runtime + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:14:21.981: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename container-runtime +STEP: Waiting for a default service account to be provisioned in namespace +[It] should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Container 'terminate-cmd-rpa': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpa': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpa': should get the expected 'State' +STEP: Container 'terminate-cmd-rpa': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpof': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpof': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpof': should get the expected 'State' +STEP: Container 'terminate-cmd-rpof': should be possible to delete [NodeConformance] +STEP: Container 'terminate-cmd-rpn': should get the expected 'RestartCount' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Phase' +STEP: Container 'terminate-cmd-rpn': should get the expected 'Ready' condition +STEP: Container 'terminate-cmd-rpn': should get the expected 'State' +STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance] +[AfterEach] [k8s.io] Container Runtime + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:14:44.509: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-container-runtime-dsd6k" for this suite. +May 6 08:14:50.525: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:14:50.654: INFO: namespace: e2e-tests-container-runtime-dsd6k, resource: bindings, ignored listing per whitelist +May 6 08:14:50.672: INFO: namespace e2e-tests-container-runtime-dsd6k deletion completed in 6.159298888s + +• [SLOW TEST:28.692 seconds] +[k8s.io] Container Runtime +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + blackbox test + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:37 + when starting a container that exits + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/runtime.go:38 + should run with the expected status [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-apps] ReplicationController + should adopt matching pods on creation [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:14:50.672: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename replication-controller +STEP: Waiting for a default service account to be provisioned in namespace +[It] should adopt matching pods on creation [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Given a Pod with a 'name' label pod-adoption is created +STEP: When a replication controller with a matching selector is created +STEP: Then the orphan pod is adopted +[AfterEach] [sig-apps] ReplicationController + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:14:55.848: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-replication-controller-kzvhw" for this suite. +May 6 08:15:17.862: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:15:17.935: INFO: namespace: e2e-tests-replication-controller-kzvhw, resource: bindings, ignored listing per whitelist +May 6 08:15:18.000: INFO: namespace e2e-tests-replication-controller-kzvhw deletion completed in 22.149283376s + +• [SLOW TEST:27.329 seconds] +[sig-apps] ReplicationController +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should adopt matching pods on creation [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSS +------------------------------ +[sig-apps] Deployment + deployment should delete old replica sets [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:15:18.006: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename deployment +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65 +[It] deployment should delete old replica sets [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 08:15:18.150: INFO: Pod name cleanup-pod: Found 1 pods out of 1 +STEP: ensuring each pod is running +May 6 08:15:20.164: INFO: Creating deployment test-cleanup-deployment +STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59 +May 6 08:15:22.234: INFO: Deployment "test-cleanup-deployment": +&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment,GenerateName:,Namespace:e2e-tests-deployment-wfxqh,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-wfxqh/deployments/test-cleanup-deployment,UID:16be6ebb-6fd7-11e9-8e1b-fa163ee16beb,ResourceVersion:14367,Generation:1,CreationTimestamp:2019-05-06 08:15:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 1,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:25%!,(MISSING)MaxSurge:25%!,(MISSING)},},MinReadySeconds:0,RevisionHistoryLimit:*0,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:1,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-05-06 08:15:20 +0000 UTC 2019-05-06 08:15:20 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-05-06 08:15:22 +0000 UTC 2019-05-06 08:15:20 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-cleanup-deployment-7dbbfcf846" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},} + +May 6 08:15:22.239: INFO: New ReplicaSet "test-cleanup-deployment-7dbbfcf846" of Deployment "test-cleanup-deployment": +&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-7dbbfcf846,GenerateName:,Namespace:e2e-tests-deployment-wfxqh,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-wfxqh/replicasets/test-cleanup-deployment-7dbbfcf846,UID:16c12f54-6fd7-11e9-8e1b-fa163ee16beb,ResourceVersion:14358,Generation:1,CreationTimestamp:2019-05-06 08:15:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-cleanup-deployment 16be6ebb-6fd7-11e9-8e1b-fa163ee16beb 0xc00227f3b7 0xc00227f3b8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},} +May 6 08:15:22.244: INFO: Pod "test-cleanup-deployment-7dbbfcf846-7b5zj" is available: +&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-cleanup-deployment-7dbbfcf846-7b5zj,GenerateName:test-cleanup-deployment-7dbbfcf846-,Namespace:e2e-tests-deployment-wfxqh,SelfLink:/api/v1/namespaces/e2e-tests-deployment-wfxqh/pods/test-cleanup-deployment-7dbbfcf846-7b5zj,UID:16c3062f-6fd7-11e9-8e1b-fa163ee16beb,ResourceVersion:14357,Generation:0,CreationTimestamp:2019-05-06 08:15:20 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: cleanup-pod,pod-template-hash: 7dbbfcf846,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-cleanup-deployment-7dbbfcf846 16c12f54-6fd7-11e9-8e1b-fa163ee16beb 0xc00227f9f7 0xc00227f9f8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-qb7hx {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-qb7hx,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] [] [] [] [] {map[] map[]} [{default-token-qb7hx true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:15:20 +0000 UTC } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:15:22 +0000 UTC } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:15:22 +0000 UTC } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:15:20 +0000 UTC }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.126,StartTime:2019-05-06 08:15:20 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-05-06 08:15:21 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://1de203688da3b6823fb236e0848fe46343fc960ea45e9fbfd8990f08b0f50b07}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} +[AfterEach] [sig-apps] Deployment + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:15:22.244: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-deployment-wfxqh" for this suite. +May 6 08:15:28.264: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:15:28.303: INFO: namespace: e2e-tests-deployment-wfxqh, resource: bindings, ignored listing per whitelist +May 6 08:15:28.460: INFO: namespace e2e-tests-deployment-wfxqh deletion completed in 6.210683454s + +• [SLOW TEST:10.456 seconds] +[sig-apps] Deployment +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + deployment should delete old replica sets [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-storage] Projected configMap + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:15:28.464: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating configMap with name projected-configmap-test-volume-map-1bcbd7bb-6fd7-11e9-a235-ba138c0d9035 +STEP: Creating a pod to test consume configMaps +May 6 08:15:28.664: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-1bcc8881-6fd7-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-nplqm" to be "success or failure" +May 6 08:15:28.673: INFO: Pod "pod-projected-configmaps-1bcc8881-6fd7-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 9.463406ms +May 6 08:15:30.680: INFO: Pod "pod-projected-configmaps-1bcc8881-6fd7-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016339361s +STEP: Saw pod success +May 6 08:15:30.680: INFO: Pod "pod-projected-configmaps-1bcc8881-6fd7-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:15:30.684: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-configmaps-1bcc8881-6fd7-11e9-a235-ba138c0d9035 container projected-configmap-volume-test: +STEP: delete the pod +May 6 08:15:30.719: INFO: Waiting for pod pod-projected-configmaps-1bcc8881-6fd7-11e9-a235-ba138c0d9035 to disappear +May 6 08:15:30.723: INFO: Pod pod-projected-configmaps-1bcc8881-6fd7-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected configMap + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:15:30.723: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-nplqm" for this suite. +May 6 08:15:36.761: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:15:36.788: INFO: namespace: e2e-tests-projected-nplqm, resource: bindings, ignored listing per whitelist +May 6 08:15:36.952: INFO: namespace e2e-tests-projected-nplqm deletion completed in 6.224888204s + +• [SLOW TEST:8.489 seconds] +[sig-storage] Projected configMap +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34 + should be consumable from pods in volume with mappings [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Kubectl replace + should update a single-container pod's image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:15:36.955: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Kubectl replace + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1563 +[It] should update a single-container pod's image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: running the image docker.io/library/nginx:1.14-alpine +May 6 08:15:37.159: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 run e2e-test-nginx-pod --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --labels=run=e2e-test-nginx-pod --namespace=e2e-tests-kubectl-xmhnv' +May 6 08:15:37.297: INFO: stderr: "" +May 6 08:15:37.297: INFO: stdout: "pod/e2e-test-nginx-pod created\n" +STEP: verifying the pod e2e-test-nginx-pod is running +STEP: verifying the pod e2e-test-nginx-pod was created +May 6 08:15:42.348: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pod e2e-test-nginx-pod --namespace=e2e-tests-kubectl-xmhnv -o json' +May 6 08:15:42.488: INFO: stderr: "" +May 6 08:15:42.488: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2019-05-06T08:15:37Z\",\n \"labels\": {\n \"run\": \"e2e-test-nginx-pod\"\n },\n \"name\": \"e2e-test-nginx-pod\",\n \"namespace\": \"e2e-tests-kubectl-xmhnv\",\n \"resourceVersion\": \"14482\",\n \"selfLink\": \"/api/v1/namespaces/e2e-tests-kubectl-xmhnv/pods/e2e-test-nginx-pod\",\n \"uid\": \"20f13a15-6fd7-11e9-8e1b-fa163ee16beb\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"docker.io/library/nginx:1.14-alpine\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"e2e-test-nginx-pod\",\n \"resources\": {},\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"default-token-qthx5\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"ClusterFirst\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"kubernetes-cluster-2696-minion-0\",\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 30,\n \"volumes\": [\n {\n \"name\": \"default-token-qthx5\",\n \"secret\": {\n \"defaultMode\": 420,\n \"secretName\": \"default-token-qthx5\"\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2019-05-06T08:15:37Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2019-05-06T08:15:39Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2019-05-06T08:15:39Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2019-05-06T08:15:37Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"docker://aa1f64ae6cca457081193559e40c4eade4f463ab8adf8d00c0922d4af8cb4a3f\",\n \"image\": \"nginx:1.14-alpine\",\n \"imageID\": \"docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7\",\n \"lastState\": {},\n \"name\": \"e2e-test-nginx-pod\",\n \"ready\": true,\n \"restartCount\": 0,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2019-05-06T08:15:38Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.0.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.100.112.66\",\n \"qosClass\": \"BestEffort\",\n \"startTime\": \"2019-05-06T08:15:37Z\"\n }\n}\n" +STEP: replace the image in the pod +May 6 08:15:42.489: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 replace -f - --namespace=e2e-tests-kubectl-xmhnv' +May 6 08:15:42.841: INFO: stderr: "" +May 6 08:15:42.841: INFO: stdout: "pod/e2e-test-nginx-pod replaced\n" +STEP: verifying the pod e2e-test-nginx-pod has the right image docker.io/library/busybox:1.29 +[AfterEach] [k8s.io] Kubectl replace + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1568 +May 6 08:15:42.845: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete pods e2e-test-nginx-pod --namespace=e2e-tests-kubectl-xmhnv' +May 6 08:15:44.683: INFO: stderr: "" +May 6 08:15:44.683: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:15:44.683: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-xmhnv" for this suite. +May 6 08:15:50.709: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:15:50.894: INFO: namespace: e2e-tests-kubectl-xmhnv, resource: bindings, ignored listing per whitelist +May 6 08:15:50.958: INFO: namespace e2e-tests-kubectl-xmhnv deletion completed in 6.26719615s + +• [SLOW TEST:14.004 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Kubectl replace + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should update a single-container pod's image [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSS +------------------------------ +[k8s.io] Pods + should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:15:50.962: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename pods +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132 +[It] should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating the pod +STEP: submitting the pod to kubernetes +STEP: verifying the pod is in kubernetes +STEP: updating the pod +May 6 08:15:53.623: INFO: Successfully updated pod "pod-update-29295b00-6fd7-11e9-a235-ba138c0d9035" +STEP: verifying the updated pod is in kubernetes +May 6 08:15:53.637: INFO: Pod update OK +[AfterEach] [k8s.io] Pods + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:15:53.637: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-pods-5w8ls" for this suite. +May 6 08:16:15.692: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:16:15.712: INFO: namespace: e2e-tests-pods-5w8ls, resource: bindings, ignored listing per whitelist +May 6 08:16:15.856: INFO: namespace e2e-tests-pods-5w8ls deletion completed in 22.211931592s + +• [SLOW TEST:24.895 seconds] +[k8s.io] Pods +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should be updated [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSS +------------------------------ +[k8s.io] [sig-node] PreStop + should call prestop when killing a pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [k8s.io] [sig-node] PreStop + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:16:15.857: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename prestop +STEP: Waiting for a default service account to be provisioned in namespace +[It] should call prestop when killing a pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating server pod server in namespace e2e-tests-prestop-58r6w +STEP: Waiting for pods to come up. +STEP: Creating tester pod tester in namespace e2e-tests-prestop-58r6w +STEP: Deleting pre-stop pod +May 6 08:16:31.131: INFO: Saw: { + "Hostname": "server", + "Sent": null, + "Received": { + "prestop": 1 + }, + "Errors": null, + "Log": [ + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up.", + "default/nettest has 0 endpoints ([]), which is less than 8 as expected. Waiting for all endpoints to come up." + ], + "StillContactingPeers": true +} +STEP: Deleting the server pod +[AfterEach] [k8s.io] [sig-node] PreStop + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:16:31.141: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-prestop-58r6w" for this suite. +May 6 08:17:09.162: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:17:09.191: INFO: namespace: e2e-tests-prestop-58r6w, resource: bindings, ignored listing per whitelist +May 6 08:17:09.269: INFO: namespace e2e-tests-prestop-58r6w deletion completed in 38.119674061s + +• [SLOW TEST:53.412 seconds] +[k8s.io] [sig-node] PreStop +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should call prestop when killing a pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SS +------------------------------ +[sig-storage] Downward API volume + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:17:09.270: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating the pod +May 6 08:17:11.927: INFO: Successfully updated pod "labelsupdate57d54581-6fd7-11e9-a235-ba138c0d9035" +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:17:15.963: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-5srqh" for this suite. +May 6 08:17:38.004: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:17:38.070: INFO: namespace: e2e-tests-downward-api-5srqh, resource: bindings, ignored listing per whitelist +May 6 08:17:38.196: INFO: namespace e2e-tests-downward-api-5srqh deletion completed in 22.224870449s + +• [SLOW TEST:28.926 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should update labels on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSS +------------------------------ +[sig-apps] Daemon set [Serial] + should run and stop complex daemon [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:17:38.197: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename daemonsets +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102 +[It] should run and stop complex daemon [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 08:17:38.352: INFO: Creating daemon "daemon-set" with a node selector +STEP: Initially, daemon pods should not be running on any nodes. +May 6 08:17:38.363: INFO: Number of nodes with available pods: 0 +May 6 08:17:38.364: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Change node label to blue, check that daemon pod is launched. +May 6 08:17:38.396: INFO: Number of nodes with available pods: 0 +May 6 08:17:38.396: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:39.404: INFO: Number of nodes with available pods: 0 +May 6 08:17:39.405: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:40.405: INFO: Number of nodes with available pods: 1 +May 6 08:17:40.405: INFO: Number of running nodes: 1, number of available pods: 1 +STEP: Update the node label to green, and wait for daemons to be unscheduled +May 6 08:17:40.454: INFO: Number of nodes with available pods: 1 +May 6 08:17:40.455: INFO: Number of running nodes: 0, number of available pods: 1 +May 6 08:17:41.459: INFO: Number of nodes with available pods: 0 +May 6 08:17:41.459: INFO: Number of running nodes: 0, number of available pods: 0 +STEP: Update DaemonSet node selector to green, and change its update strategy to RollingUpdate +May 6 08:17:41.483: INFO: Number of nodes with available pods: 0 +May 6 08:17:41.483: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:42.489: INFO: Number of nodes with available pods: 0 +May 6 08:17:42.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:43.488: INFO: Number of nodes with available pods: 0 +May 6 08:17:43.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:44.488: INFO: Number of nodes with available pods: 0 +May 6 08:17:44.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:45.488: INFO: Number of nodes with available pods: 0 +May 6 08:17:45.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:46.490: INFO: Number of nodes with available pods: 0 +May 6 08:17:46.490: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:47.488: INFO: Number of nodes with available pods: 0 +May 6 08:17:47.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:48.489: INFO: Number of nodes with available pods: 0 +May 6 08:17:48.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:49.490: INFO: Number of nodes with available pods: 0 +May 6 08:17:49.490: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:50.499: INFO: Number of nodes with available pods: 0 +May 6 08:17:50.499: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:51.490: INFO: Number of nodes with available pods: 0 +May 6 08:17:51.490: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:52.499: INFO: Number of nodes with available pods: 0 +May 6 08:17:52.499: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:53.489: INFO: Number of nodes with available pods: 0 +May 6 08:17:53.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:54.489: INFO: Number of nodes with available pods: 0 +May 6 08:17:54.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:55.489: INFO: Number of nodes with available pods: 0 +May 6 08:17:55.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:56.495: INFO: Number of nodes with available pods: 0 +May 6 08:17:56.495: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:57.490: INFO: Number of nodes with available pods: 0 +May 6 08:17:57.490: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:58.489: INFO: Number of nodes with available pods: 0 +May 6 08:17:58.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:17:59.489: INFO: Number of nodes with available pods: 0 +May 6 08:17:59.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:00.493: INFO: Number of nodes with available pods: 0 +May 6 08:18:00.493: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:01.489: INFO: Number of nodes with available pods: 0 +May 6 08:18:01.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:02.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:02.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:03.489: INFO: Number of nodes with available pods: 0 +May 6 08:18:03.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:04.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:04.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:05.490: INFO: Number of nodes with available pods: 0 +May 6 08:18:05.492: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:06.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:06.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:07.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:07.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:08.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:08.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:09.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:09.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:10.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:10.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:11.496: INFO: Number of nodes with available pods: 0 +May 6 08:18:11.497: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:12.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:12.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:13.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:13.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:14.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:14.489: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:15.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:15.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:16.488: INFO: Number of nodes with available pods: 0 +May 6 08:18:16.488: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod +May 6 08:18:17.490: INFO: Number of nodes with available pods: 1 +May 6 08:18:17.490: INFO: Number of running nodes: 1, number of available pods: 1 +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68 +STEP: Deleting DaemonSet "daemon-set" +STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-hwrzs, will wait for the garbage collector to delete the pods +May 6 08:18:17.563: INFO: Deleting DaemonSet.extensions daemon-set took: 9.698235ms +May 6 08:18:17.663: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.303974ms +May 6 08:19:00.171: INFO: Number of nodes with available pods: 0 +May 6 08:19:00.172: INFO: Number of running nodes: 0, number of available pods: 0 +May 6 08:19:00.174: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-hwrzs/daemonsets","resourceVersion":"15089"},"items":null} + +May 6 08:19:00.177: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-hwrzs/pods","resourceVersion":"15089"},"items":null} + +[AfterEach] [sig-apps] Daemon set [Serial] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:19:00.187: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-daemonsets-hwrzs" for this suite. +May 6 08:19:06.210: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:19:06.238: INFO: namespace: e2e-tests-daemonsets-hwrzs, resource: bindings, ignored listing per whitelist +May 6 08:19:06.330: INFO: namespace e2e-tests-daemonsets-hwrzs deletion completed in 6.135385903s + +• [SLOW TEST:88.133 seconds] +[sig-apps] Daemon set [Serial] +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22 + should run and stop complex daemon [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:19:06.330: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 08:19:06.479: INFO: Waiting up to 5m0s for pod "downwardapi-volume-9d9ca249-6fd7-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-ncn48" to be "success or failure" +May 6 08:19:06.486: INFO: Pod "downwardapi-volume-9d9ca249-6fd7-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.296189ms +May 6 08:19:08.493: INFO: Pod "downwardapi-volume-9d9ca249-6fd7-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.013841973s +STEP: Saw pod success +May 6 08:19:08.493: INFO: Pod "downwardapi-volume-9d9ca249-6fd7-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:19:08.497: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-9d9ca249-6fd7-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 08:19:08.569: INFO: Waiting for pod downwardapi-volume-9d9ca249-6fd7-11e9-a235-ba138c0d9035 to disappear +May 6 08:19:08.571: INFO: Pod downwardapi-volume-9d9ca249-6fd7-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:19:08.571: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-ncn48" for this suite. +May 6 08:19:14.591: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:19:14.682: INFO: namespace: e2e-tests-downward-api-ncn48, resource: bindings, ignored listing per whitelist +May 6 08:19:14.758: INFO: namespace e2e-tests-downward-api-ncn48 deletion completed in 6.183341064s + +• [SLOW TEST:8.429 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSS +------------------------------ +[sig-cli] Kubectl client [k8s.io] Update Demo + should create and stop a replication controller [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:19:14.760: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename kubectl +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243 +[BeforeEach] [k8s.io] Update Demo + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295 +[It] should create and stop a replication controller [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: creating a replication controller +May 6 08:19:14.932: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:15.968: INFO: stderr: "" +May 6 08:19:15.968: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n" +STEP: waiting for all containers in name=update-demo pods to come up. +May 6 08:19:15.968: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:16.136: INFO: stderr: "" +May 6 08:19:16.136: INFO: stdout: "update-demo-nautilus-jsgkq update-demo-nautilus-xt6ql " +May 6 08:19:16.136: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-jsgkq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:16.270: INFO: stderr: "" +May 6 08:19:16.270: INFO: stdout: "" +May 6 08:19:16.270: INFO: update-demo-nautilus-jsgkq is created but not running +May 6 08:19:21.271: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:21.383: INFO: stderr: "" +May 6 08:19:21.383: INFO: stdout: "update-demo-nautilus-jsgkq update-demo-nautilus-xt6ql " +May 6 08:19:21.383: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-jsgkq -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:21.489: INFO: stderr: "" +May 6 08:19:21.489: INFO: stdout: "true" +May 6 08:19:21.489: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-jsgkq -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:21.622: INFO: stderr: "" +May 6 08:19:21.622: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +May 6 08:19:21.622: INFO: validating pod update-demo-nautilus-jsgkq +May 6 08:19:21.628: INFO: got data: { + "image": "nautilus.jpg" +} + +May 6 08:19:21.628: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +May 6 08:19:21.628: INFO: update-demo-nautilus-jsgkq is verified up and running +May 6 08:19:21.628: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-xt6ql -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:21.776: INFO: stderr: "" +May 6 08:19:21.776: INFO: stdout: "true" +May 6 08:19:21.776: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-xt6ql -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:21.887: INFO: stderr: "" +May 6 08:19:21.887: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0" +May 6 08:19:21.887: INFO: validating pod update-demo-nautilus-xt6ql +May 6 08:19:21.894: INFO: got data: { + "image": "nautilus.jpg" +} + +May 6 08:19:21.894: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg . +May 6 08:19:21.894: INFO: update-demo-nautilus-xt6ql is verified up and running +STEP: using delete to clean up resources +May 6 08:19:21.894: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:22.020: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n" +May 6 08:19:22.020: INFO: stdout: "replicationcontroller \"update-demo-nautilus\" force deleted\n" +May 6 08:19:22.020: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get rc,svc -l name=update-demo --no-headers --namespace=e2e-tests-kubectl-wpxx4' +May 6 08:19:22.258: INFO: stderr: "No resources found.\n" +May 6 08:19:22.258: INFO: stdout: "" +May 6 08:19:22.258: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -l name=update-demo --namespace=e2e-tests-kubectl-wpxx4 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}' +May 6 08:19:22.497: INFO: stderr: "" +May 6 08:19:22.497: INFO: stdout: "" +[AfterEach] [sig-cli] Kubectl client + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:19:22.497: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-kubectl-wpxx4" for this suite. +May 6 08:19:28.515: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:19:28.653: INFO: namespace: e2e-tests-kubectl-wpxx4, resource: bindings, ignored listing per whitelist +May 6 08:19:28.659: INFO: namespace e2e-tests-kubectl-wpxx4 deletion completed in 6.157661224s + +• [SLOW TEST:13.900 seconds] +[sig-cli] Kubectl client +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22 + [k8s.io] Update Demo + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694 + should create and stop a replication controller [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +[sig-storage] EmptyDir volumes + should support (root,0644,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:19:28.663: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename emptydir +STEP: Waiting for a default service account to be provisioned in namespace +[It] should support (root,0644,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test emptydir 0644 on node default medium +May 6 08:19:28.835: INFO: Waiting up to 5m0s for pod "pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-trmch" to be "success or failure" +May 6 08:19:28.845: INFO: Pod "pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 9.504943ms +May 6 08:19:30.849: INFO: Pod "pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013768326s +May 6 08:19:32.867: INFO: Pod "pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.031876844s +STEP: Saw pod success +May 6 08:19:32.867: INFO: Pod "pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:19:32.876: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035 container test-container: +STEP: delete the pod +May 6 08:19:32.905: INFO: Waiting for pod pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035 to disappear +May 6 08:19:32.911: INFO: Pod pod-aaf24a2a-6fd7-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] EmptyDir volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:19:32.911: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-emptydir-trmch" for this suite. +May 6 08:19:38.927: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:19:39.004: INFO: namespace: e2e-tests-emptydir-trmch, resource: bindings, ignored listing per whitelist +May 6 08:19:39.105: INFO: namespace e2e-tests-emptydir-trmch deletion completed in 6.190353013s + +• [SLOW TEST:10.443 seconds] +[sig-storage] EmptyDir volumes +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40 + should support (root,0644,default) [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-storage] Projected downwardAPI + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:19:39.106: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating the pod +May 6 08:19:41.891: INFO: Successfully updated pod "annotationupdateb131d23a-6fd7-11e9-a235-ba138c0d9035" +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:19:43.927: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-qs4pq" for this suite. +May 6 08:20:05.944: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:20:05.986: INFO: namespace: e2e-tests-projected-qs4pq, resource: bindings, ignored listing per whitelist +May 6 08:20:06.148: INFO: namespace e2e-tests-projected-qs4pq deletion completed in 22.218111765s + +• [SLOW TEST:27.042 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should update annotations on modification [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Subpath Atomic writer volumes + should support subpaths with downward pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Subpath + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:20:06.149: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename subpath +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] Atomic writer volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38 +STEP: Setting up data +[It] should support subpaths with downward pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating pod pod-subpath-test-downwardapi-9kcb +STEP: Creating a pod to test atomic-volume-subpath +May 6 08:20:06.312: INFO: Waiting up to 5m0s for pod "pod-subpath-test-downwardapi-9kcb" in namespace "e2e-tests-subpath-qfzvr" to be "success or failure" +May 6 08:20:06.321: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Pending", Reason="", readiness=false. Elapsed: 9.294483ms +May 6 08:20:08.327: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015100271s +May 6 08:20:10.334: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 4.022398921s +May 6 08:20:12.339: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 6.026606528s +May 6 08:20:14.343: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 8.030793579s +May 6 08:20:16.347: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 10.035289613s +May 6 08:20:18.351: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 12.039123647s +May 6 08:20:20.356: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 14.044043978s +May 6 08:20:22.360: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 16.048175161s +May 6 08:20:24.365: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 18.052840092s +May 6 08:20:26.370: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 20.058077034s +May 6 08:20:28.375: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Running", Reason="", readiness=false. Elapsed: 22.062967372s +May 6 08:20:30.380: INFO: Pod "pod-subpath-test-downwardapi-9kcb": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.067691513s +STEP: Saw pod success +May 6 08:20:30.380: INFO: Pod "pod-subpath-test-downwardapi-9kcb" satisfied condition "success or failure" +May 6 08:20:30.384: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-subpath-test-downwardapi-9kcb container test-container-subpath-downwardapi-9kcb: +STEP: delete the pod +May 6 08:20:30.416: INFO: Waiting for pod pod-subpath-test-downwardapi-9kcb to disappear +May 6 08:20:30.422: INFO: Pod pod-subpath-test-downwardapi-9kcb no longer exists +STEP: Deleting pod pod-subpath-test-downwardapi-9kcb +May 6 08:20:30.422: INFO: Deleting pod "pod-subpath-test-downwardapi-9kcb" in namespace "e2e-tests-subpath-qfzvr" +[AfterEach] [sig-storage] Subpath + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:20:30.428: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-subpath-qfzvr" for this suite. +May 6 08:20:36.470: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:20:36.633: INFO: namespace: e2e-tests-subpath-qfzvr, resource: bindings, ignored listing per whitelist +May 6 08:20:36.656: INFO: namespace e2e-tests-subpath-qfzvr deletion completed in 6.219330711s + +• [SLOW TEST:30.508 seconds] +[sig-storage] Subpath +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22 + Atomic writer volumes + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34 + should support subpaths with downward pod [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSS +------------------------------ +[sig-storage] Projected downwardAPI + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:20:36.659: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename projected +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39 +[It] should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 08:20:36.838: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-z8x92" to be "success or failure" +May 6 08:20:36.852: INFO: Pod "downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 13.007036ms +May 6 08:20:38.863: INFO: Pod "downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023929081s +May 6 08:20:40.875: INFO: Pod "downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.036500457s +STEP: Saw pod success +May 6 08:20:40.875: INFO: Pod "downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:20:40.879: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 08:20:40.905: INFO: Waiting for pod downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035 to disappear +May 6 08:20:40.922: INFO: Pod downwardapi-volume-d37c9ebc-6fd7-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Projected downwardAPI + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:20:40.922: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-projected-z8x92" for this suite. +May 6 08:20:46.960: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:20:47.065: INFO: namespace: e2e-tests-projected-z8x92, resource: bindings, ignored listing per whitelist +May 6 08:20:47.155: INFO: namespace e2e-tests-projected-z8x92 deletion completed in 6.21925285s + +• [SLOW TEST:10.496 seconds] +[sig-storage] Projected downwardAPI +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33 + should provide container's memory limit [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +S +------------------------------ +[sig-storage] Downward API volume + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:20:47.155: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename downward-api +STEP: Waiting for a default service account to be provisioned in namespace +[BeforeEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39 +[It] should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +STEP: Creating a pod to test downward API volume plugin +May 6 08:20:47.343: INFO: Waiting up to 5m0s for pod "downwardapi-volume-d9bc0eca-6fd7-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-lqt7b" to be "success or failure" +May 6 08:20:47.384: INFO: Pod "downwardapi-volume-d9bc0eca-6fd7-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 40.228882ms +May 6 08:20:49.401: INFO: Pod "downwardapi-volume-d9bc0eca-6fd7-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.057275424s +STEP: Saw pod success +May 6 08:20:49.401: INFO: Pod "downwardapi-volume-d9bc0eca-6fd7-11e9-a235-ba138c0d9035" satisfied condition "success or failure" +May 6 08:20:49.405: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-d9bc0eca-6fd7-11e9-a235-ba138c0d9035 container client-container: +STEP: delete the pod +May 6 08:20:49.447: INFO: Waiting for pod downwardapi-volume-d9bc0eca-6fd7-11e9-a235-ba138c0d9035 to disappear +May 6 08:20:49.455: INFO: Pod downwardapi-volume-d9bc0eca-6fd7-11e9-a235-ba138c0d9035 no longer exists +[AfterEach] [sig-storage] Downward API volume + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154 +May 6 08:20:49.455: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready +STEP: Destroying namespace "e2e-tests-downward-api-lqt7b" for this suite. +May 6 08:20:55.490: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered +May 6 08:20:55.588: INFO: namespace: e2e-tests-downward-api-lqt7b, resource: bindings, ignored listing per whitelist +May 6 08:20:55.674: INFO: namespace e2e-tests-downward-api-lqt7b deletion completed in 6.213057686s + +• [SLOW TEST:8.519 seconds] +[sig-storage] Downward API volume +/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34 + should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +------------------------------ +SSSSSSSSSSSSSSSSSSSSSSS +------------------------------ +[sig-network] Proxy version v1 + should proxy logs on node using proxy subresource [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +[BeforeEach] version v1 + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153 +STEP: Creating a kubernetes client +May 6 08:20:55.676: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706 +STEP: Building a namespace api object, basename proxy +STEP: Waiting for a default service account to be provisioned in namespace +[It] should proxy logs on node using proxy subresource [Conformance] + /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699 +May 6 08:20:55.844: INFO: (0) /api/v1/nodes/kubernetes-cluster-2696-minion-0/proxy/logs/:
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1358
+[It] should support rolling-update to same image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May  6 08:21:02.392: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 run e2e-test-nginx-rc --image=docker.io/library/nginx:1.14-alpine --generator=run/v1 --namespace=e2e-tests-kubectl-4s2zt'
+May  6 08:21:02.536: INFO: stderr: "kubectl run --generator=run/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+May  6 08:21:02.536: INFO: stdout: "replicationcontroller/e2e-test-nginx-rc created\n"
+STEP: verifying the rc e2e-test-nginx-rc was created
+STEP: rolling-update to same image controller
+May  6 08:21:02.549: INFO: scanned /root for discovery docs: 
+May  6 08:21:02.549: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 rolling-update e2e-test-nginx-rc --update-period=1s --image=docker.io/library/nginx:1.14-alpine --image-pull-policy=IfNotPresent --namespace=e2e-tests-kubectl-4s2zt'
+May  6 08:21:18.442: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+May  6 08:21:18.442: INFO: stdout: "Created e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14\nScaling up e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+May  6 08:21:18.442: INFO: stdout: "Created e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14\nScaling up e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14 from 0 to 1, scaling down e2e-test-nginx-rc from 1 to 0 (keep 1 pods available, don't exceed 2 pods)\nScaling e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14 up to 1\nScaling e2e-test-nginx-rc down to 0\nUpdate succeeded. Deleting old controller: e2e-test-nginx-rc\nRenaming e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14 to e2e-test-nginx-rc\nreplicationcontroller/e2e-test-nginx-rc rolling updated\n"
+STEP: waiting for all containers in run=e2e-test-nginx-rc pods to come up.
+May  6 08:21:18.442: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l run=e2e-test-nginx-rc --namespace=e2e-tests-kubectl-4s2zt'
+May  6 08:21:18.552: INFO: stderr: ""
+May  6 08:21:18.552: INFO: stdout: "e2e-test-nginx-rc-4k752 e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14-jlpgn "
+STEP: Replicas for run=e2e-test-nginx-rc: expected=1 actual=2
+May  6 08:21:23.552: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l run=e2e-test-nginx-rc --namespace=e2e-tests-kubectl-4s2zt'
+May  6 08:21:23.688: INFO: stderr: ""
+May  6 08:21:23.688: INFO: stdout: "e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14-jlpgn "
+May  6 08:21:23.688: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14-jlpgn -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "e2e-test-nginx-rc") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-4s2zt'
+May  6 08:21:23.802: INFO: stderr: ""
+May  6 08:21:23.802: INFO: stdout: "true"
+May  6 08:21:23.802: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14-jlpgn -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "e2e-test-nginx-rc"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-4s2zt'
+May  6 08:21:23.919: INFO: stderr: ""
+May  6 08:21:23.919: INFO: stdout: "docker.io/library/nginx:1.14-alpine"
+May  6 08:21:23.919: INFO: e2e-test-nginx-rc-8ad8a7caaaec03ca3e54252cad01ac14-jlpgn is verified up and running
+[AfterEach] [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1364
+May  6 08:21:23.919: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete rc e2e-test-nginx-rc --namespace=e2e-tests-kubectl-4s2zt'
+May  6 08:21:24.078: INFO: stderr: ""
+May  6 08:21:24.078: INFO: stdout: "replicationcontroller \"e2e-test-nginx-rc\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:21:24.078: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-4s2zt" for this suite.
+May  6 08:21:46.117: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:21:46.165: INFO: namespace: e2e-tests-kubectl-4s2zt, resource: bindings, ignored listing per whitelist
+May  6 08:21:46.297: INFO: namespace e2e-tests-kubectl-4s2zt deletion completed in 22.210187702s
+
+• [SLOW TEST:44.067 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl rolling-update
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should support rolling-update to same image  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Watchers 
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:21:46.297: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename watch
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating a watch on configmaps
+STEP: creating a new configmap
+STEP: modifying the configmap once
+STEP: closing the watch once it receives two notifications
+May  6 08:21:46.454: INFO: Got : ADDED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-kvpxm,SelfLink:/api/v1/namespaces/e2e-tests-watch-kvpxm/configmaps/e2e-watch-test-watch-closed,UID:fcface57-6fd7-11e9-8e1b-fa163ee16beb,ResourceVersion:15827,Generation:0,CreationTimestamp:2019-05-06 08:21:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{},BinaryData:map[string][]byte{},}
+May  6 08:21:46.455: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-kvpxm,SelfLink:/api/v1/namespaces/e2e-tests-watch-kvpxm/configmaps/e2e-watch-test-watch-closed,UID:fcface57-6fd7-11e9-8e1b-fa163ee16beb,ResourceVersion:15828,Generation:0,CreationTimestamp:2019-05-06 08:21:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 1,},BinaryData:map[string][]byte{},}
+STEP: modifying the configmap a second time, while the watch is closed
+STEP: creating a new watch on configmaps from the last resource version observed by the first watch
+STEP: deleting the configmap
+STEP: Expecting to observe notifications for all changes to the configmap since the first watch closed
+May  6 08:21:46.535: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-kvpxm,SelfLink:/api/v1/namespaces/e2e-tests-watch-kvpxm/configmaps/e2e-watch-test-watch-closed,UID:fcface57-6fd7-11e9-8e1b-fa163ee16beb,ResourceVersion:15829,Generation:0,CreationTimestamp:2019-05-06 08:21:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+May  6 08:21:46.535: INFO: Got : DELETED &ConfigMap{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:e2e-watch-test-watch-closed,GenerateName:,Namespace:e2e-tests-watch-kvpxm,SelfLink:/api/v1/namespaces/e2e-tests-watch-kvpxm/configmaps/e2e-watch-test-watch-closed,UID:fcface57-6fd7-11e9-8e1b-fa163ee16beb,ResourceVersion:15830,Generation:0,CreationTimestamp:2019-05-06 08:21:46 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{watch-this-configmap: watch-closed-and-restarted,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
+[AfterEach] [sig-api-machinery] Watchers
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:21:46.535: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-watch-kvpxm" for this suite.
+May  6 08:21:52.568: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:21:52.736: INFO: namespace: e2e-tests-watch-kvpxm, resource: bindings, ignored listing per whitelist
+May  6 08:21:52.745: INFO: namespace e2e-tests-watch-kvpxm deletion completed in 6.20019701s
+
+• [SLOW TEST:6.448 seconds]
+[sig-api-machinery] Watchers
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:21:52.746: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename var-expansion
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test substitution in container's command
+May  6 08:21:52.912: INFO: Waiting up to 5m0s for pod "var-expansion-00d30451-6fd8-11e9-a235-ba138c0d9035" in namespace "e2e-tests-var-expansion-nwzhx" to be "success or failure"
+May  6 08:21:52.931: INFO: Pod "var-expansion-00d30451-6fd8-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 18.626672ms
+May  6 08:21:54.935: INFO: Pod "var-expansion-00d30451-6fd8-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023047455s
+STEP: Saw pod success
+May  6 08:21:54.935: INFO: Pod "var-expansion-00d30451-6fd8-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:21:54.939: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod var-expansion-00d30451-6fd8-11e9-a235-ba138c0d9035 container dapi-container: 
+STEP: delete the pod
+May  6 08:21:54.980: INFO: Waiting for pod var-expansion-00d30451-6fd8-11e9-a235-ba138c0d9035 to disappear
+May  6 08:21:54.988: INFO: Pod var-expansion-00d30451-6fd8-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:21:54.988: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-var-expansion-nwzhx" for this suite.
+May  6 08:22:01.017: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:22:01.159: INFO: namespace: e2e-tests-var-expansion-nwzhx, resource: bindings, ignored listing per whitelist
+May  6 08:22:01.168: INFO: namespace e2e-tests-var-expansion-nwzhx deletion completed in 6.167495999s
+
+• [SLOW TEST:8.423 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should allow substituting values in a container's command [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] Pods 
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:22:01.169: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:22:01.281: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:22:05.352: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-6bvw8" for this suite.
+May  6 08:22:43.385: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:22:43.515: INFO: namespace: e2e-tests-pods-6bvw8, resource: bindings, ignored listing per whitelist
+May  6 08:22:43.563: INFO: namespace e2e-tests-pods-6bvw8 deletion completed in 38.202023909s
+
+• [SLOW TEST:42.394 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[k8s.io] Probing container 
+  should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:22:43.564: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-cmwpv
+May  6 08:22:49.730: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-cmwpv
+STEP: checking the pod's current state and verifying that restartCount is present
+May  6 08:22:49.734: INFO: Initial restart count of pod liveness-http is 0
+May  6 08:23:07.786: INFO: Restart count of pod e2e-tests-container-probe-cmwpv/liveness-http is now 1 (18.051613071s elapsed)
+May  6 08:23:27.852: INFO: Restart count of pod e2e-tests-container-probe-cmwpv/liveness-http is now 2 (38.117247298s elapsed)
+May  6 08:23:45.909: INFO: Restart count of pod e2e-tests-container-probe-cmwpv/liveness-http is now 3 (56.174744214s elapsed)
+May  6 08:24:05.971: INFO: Restart count of pod e2e-tests-container-probe-cmwpv/liveness-http is now 4 (1m16.236443743s elapsed)
+May  6 08:25:10.135: INFO: Restart count of pod e2e-tests-container-probe-cmwpv/liveness-http is now 5 (2m20.400520536s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:25:10.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-cmwpv" for this suite.
+May  6 08:25:16.194: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:25:16.254: INFO: namespace: e2e-tests-container-probe-cmwpv, resource: bindings, ignored listing per whitelist
+May  6 08:25:16.347: INFO: namespace e2e-tests-container-probe-cmwpv deletion completed in 6.179010521s
+
+• [SLOW TEST:152.783 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should have monotonically increasing restart count [Slow][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Proxy server 
+  should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:25:16.347: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should support --unix-socket=/path  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Starting the proxy
+May  6 08:25:16.480: INFO: Asynchronously running '/usr/local/bin/kubectl kubectl --kubeconfig=/tmp/kubeconfig-307990706 proxy --unix-socket=/tmp/kubectl-proxy-unix357423721/test'
+STEP: retrieving proxy /api/ output
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:25:16.562: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-4lmns" for this suite.
+May  6 08:25:22.581: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:25:22.682: INFO: namespace: e2e-tests-kubectl-4lmns, resource: bindings, ignored listing per whitelist
+May  6 08:25:22.726: INFO: namespace e2e-tests-kubectl-4lmns deletion completed in 6.15799781s
+
+• [SLOW TEST:6.379 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Proxy server
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should support --unix-socket=/path  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:25:22.730: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the pods
+STEP: Gathering metrics
+W0506 08:26:02.882688      14 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May  6 08:26:02.882: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:26:02.882: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-m8jgh" for this suite.
+May  6 08:26:08.928: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:26:08.986: INFO: namespace: e2e-tests-gc-m8jgh, resource: bindings, ignored listing per whitelist
+May  6 08:26:09.041: INFO: namespace e2e-tests-gc-m8jgh deletion completed in 6.145195427s
+
+• [SLOW TEST:46.312 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should orphan pods created by rc if delete options say so [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:26:09.042: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on node default medium
+May  6 08:26:09.160: INFO: Waiting up to 5m0s for pod "pod-99904407-6fd8-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-5wqnr" to be "success or failure"
+May  6 08:26:09.178: INFO: Pod "pod-99904407-6fd8-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 18.015195ms
+May  6 08:26:11.223: INFO: Pod "pod-99904407-6fd8-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.06339524s
+May  6 08:26:13.227: INFO: Pod "pod-99904407-6fd8-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.067402297s
+STEP: Saw pod success
+May  6 08:26:13.227: INFO: Pod "pod-99904407-6fd8-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:26:13.230: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-99904407-6fd8-11e9-a235-ba138c0d9035 container test-container: 
+STEP: delete the pod
+May  6 08:26:13.253: INFO: Waiting for pod pod-99904407-6fd8-11e9-a235-ba138c0d9035 to disappear
+May  6 08:26:13.256: INFO: Pod pod-99904407-6fd8-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:26:13.256: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-5wqnr" for this suite.
+May  6 08:26:19.271: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:26:19.309: INFO: namespace: e2e-tests-emptydir-5wqnr, resource: bindings, ignored listing per whitelist
+May  6 08:26:19.399: INFO: namespace e2e-tests-emptydir-5wqnr deletion completed in 6.138664666s
+
+• [SLOW TEST:10.357 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should support rollover [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:26:19.400: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should support rollover [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:26:19.529: INFO: Pod name rollover-pod: Found 0 pods out of 1
+May  6 08:26:24.539: INFO: Pod name rollover-pod: Found 1 pods out of 1
+STEP: ensuring each pod is running
+May  6 08:26:24.540: INFO: Waiting for pods owned by replica set "test-rollover-controller" to become ready
+May  6 08:26:26.545: INFO: Creating deployment "test-rollover-deployment"
+May  6 08:26:26.559: INFO: Make sure deployment "test-rollover-deployment" performs scaling operations
+May  6 08:26:28.581: INFO: Check revision of new replica set for deployment "test-rollover-deployment"
+May  6 08:26:28.589: INFO: Ensure that both replica sets have 1 created replica
+May  6 08:26:28.596: INFO: Rollover old replica sets for deployment "test-rollover-deployment" with new image update
+May  6 08:26:28.606: INFO: Updating deployment test-rollover-deployment
+May  6 08:26:28.606: INFO: Wait deployment "test-rollover-deployment" to be observed by the deployment controller
+May  6 08:26:30.614: INFO: Wait for revision update of deployment "test-rollover-deployment" to 2
+May  6 08:26:30.621: INFO: Make sure deployment "test-rollover-deployment" is complete
+May  6 08:26:30.627: INFO: all replica sets need to contain the pod-template-hash label
+May  6 08:26:30.627: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727990, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May  6 08:26:32.639: INFO: all replica sets need to contain the pod-template-hash label
+May  6 08:26:32.639: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727990, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May  6 08:26:34.637: INFO: all replica sets need to contain the pod-template-hash label
+May  6 08:26:34.637: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727990, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May  6 08:26:36.636: INFO: all replica sets need to contain the pod-template-hash label
+May  6 08:26:36.637: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727990, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May  6 08:26:38.635: INFO: all replica sets need to contain the pod-template-hash label
+May  6 08:26:38.636: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:2, Replicas:2, UpdatedReplicas:1, ReadyReplicas:2, AvailableReplicas:1, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"MinimumReplicasAvailable", Message:"Deployment has minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727990, loc:(*time.Location)(0x7b47ba0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692727986, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"test-rollover-deployment-6b7f9d6597\" is progressing."}}, CollisionCount:(*int32)(nil)}
+May  6 08:26:40.636: INFO: 
+May  6 08:26:40.636: INFO: Ensure that both old replica sets have no replicas
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+May  6 08:26:40.646: INFO: Deployment "test-rollover-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment,GenerateName:,Namespace:e2e-tests-deployment-9hnxj,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-9hnxj/deployments/test-rollover-deployment,UID:a3eefbab-6fd8-11e9-8e1b-fa163ee16beb,ResourceVersion:16936,Generation:2,CreationTimestamp:2019-05-06 08:26:26 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:0,MaxSurge:1,},},MinReadySeconds:10,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:1,UnavailableReplicas:0,Conditions:[{Available True 2019-05-06 08:26:26 +0000 UTC 2019-05-06 08:26:26 +0000 UTC MinimumReplicasAvailable Deployment has minimum availability.} {Progressing True 2019-05-06 08:26:40 +0000 UTC 2019-05-06 08:26:26 +0000 UTC NewReplicaSetAvailable ReplicaSet "test-rollover-deployment-6b7f9d6597" has successfully progressed.}],ReadyReplicas:1,CollisionCount:nil,},}
+
+May  6 08:26:40.650: INFO: New ReplicaSet "test-rollover-deployment-6b7f9d6597" of Deployment "test-rollover-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6b7f9d6597,GenerateName:,Namespace:e2e-tests-deployment-9hnxj,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-9hnxj/replicasets/test-rollover-deployment-6b7f9d6597,UID:a529bfd2-6fd8-11e9-8e1b-fa163ee16beb,ResourceVersion:16927,Generation:2,CreationTimestamp:2019-05-06 08:26:28 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment a3eefbab-6fd8-11e9-8e1b-fa163ee16beb 0xc00153a627 0xc00153a628}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:2,ReadyReplicas:1,AvailableReplicas:1,Conditions:[],},}
+May  6 08:26:40.650: INFO: All old ReplicaSets of Deployment "test-rollover-deployment":
+May  6 08:26:40.650: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-controller,GenerateName:,Namespace:e2e-tests-deployment-9hnxj,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-9hnxj/replicasets/test-rollover-controller,UID:9fbef034-6fd8-11e9-8e1b-fa163ee16beb,ResourceVersion:16935,Generation:2,CreationTimestamp:2019-05-06 08:26:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment a3eefbab-6fd8-11e9-8e1b-fa163ee16beb 0xc001e1fd47 0xc001e1fd48}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May  6 08:26:40.650: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6586df867b,GenerateName:,Namespace:e2e-tests-deployment-9hnxj,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-9hnxj/replicasets/test-rollover-deployment-6586df867b,UID:a3f0bb3d-6fd8-11e9-8e1b-fa163ee16beb,ResourceVersion:16889,Generation:2,CreationTimestamp:2019-05-06 08:26:26 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 2,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-rollover-deployment a3eefbab-6fd8-11e9-8e1b-fa163ee16beb 0xc001e1fe87 0xc001e1fe88}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6586df867b,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis-slave gcr.io/google_samples/gb-redisslave:nonexistent [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:10,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May  6 08:26:40.653: INFO: Pod "test-rollover-deployment-6b7f9d6597-4l22j" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-rollover-deployment-6b7f9d6597-4l22j,GenerateName:test-rollover-deployment-6b7f9d6597-,Namespace:e2e-tests-deployment-9hnxj,SelfLink:/api/v1/namespaces/e2e-tests-deployment-9hnxj/pods/test-rollover-deployment-6b7f9d6597-4l22j,UID:a530403f-6fd8-11e9-8e1b-fa163ee16beb,ResourceVersion:16904,Generation:0,CreationTimestamp:2019-05-06 08:26:28 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: rollover-pod,pod-template-hash: 6b7f9d6597,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-rollover-deployment-6b7f9d6597 a529bfd2-6fd8-11e9-8e1b-fa163ee16beb 0xc00153bf07 0xc00153bf08}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-flnp4 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-flnp4,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [{default-token-flnp4 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:26:28 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:26:30 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:26:30 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:26:28 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.99,StartTime:2019-05-06 08:26:28 +0000 UTC,ContainerStatuses:[{redis {nil ContainerStateRunning{StartedAt:2019-05-06 08:26:30 +0000 UTC,} nil} {nil nil nil} true 0 gcr.io/kubernetes-e2e-test-images/redis:1.0 docker-pullable://gcr.io/kubernetes-e2e-test-images/redis@sha256:af4748d1655c08dc54d4be5182135395db9ce87aba2d4699b26b14ae197c5830 docker://674297dc5fe0330b46f6f9391d43c587794f85145c562f41ad805130e9030614}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:26:40.653: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-9hnxj" for this suite.
+May  6 08:26:46.672: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:26:46.706: INFO: namespace: e2e-tests-deployment-9hnxj, resource: bindings, ignored listing per whitelist
+May  6 08:26:46.817: INFO: namespace e2e-tests-deployment-9hnxj deletion completed in 6.160439411s
+
+• [SLOW TEST:27.417 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should support rollover [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:26:46.818: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for 30 seconds to see if the garbage collector mistakenly deletes the rs
+STEP: Gathering metrics
+W0506 08:27:16.990868      14 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May  6 08:27:16.990: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:27:16.991: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-6hw2f" for this suite.
+May  6 08:27:23.013: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:27:23.049: INFO: namespace: e2e-tests-gc-6hw2f, resource: bindings, ignored listing per whitelist
+May  6 08:27:23.148: INFO: namespace e2e-tests-gc-6hw2f deletion completed in 6.15330764s
+
+• [SLOW TEST:36.331 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:27:23.151: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 08:27:23.290: INFO: Waiting up to 5m0s for pod "downwardapi-volume-c5bfe72a-6fd8-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-492m4" to be "success or failure"
+May  6 08:27:23.302: INFO: Pod "downwardapi-volume-c5bfe72a-6fd8-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 12.019151ms
+May  6 08:27:25.315: INFO: Pod "downwardapi-volume-c5bfe72a-6fd8-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02524369s
+STEP: Saw pod success
+May  6 08:27:25.316: INFO: Pod "downwardapi-volume-c5bfe72a-6fd8-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:27:25.320: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-c5bfe72a-6fd8-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 08:27:25.358: INFO: Waiting for pod downwardapi-volume-c5bfe72a-6fd8-11e9-a235-ba138c0d9035 to disappear
+May  6 08:27:25.364: INFO: Pod downwardapi-volume-c5bfe72a-6fd8-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:27:25.364: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-492m4" for this suite.
+May  6 08:27:31.383: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:27:31.425: INFO: namespace: e2e-tests-downward-api-492m4, resource: bindings, ignored listing per whitelist
+May  6 08:27:31.518: INFO: namespace e2e-tests-downward-api-492m4 deletion completed in 6.149885739s
+
+• [SLOW TEST:8.367 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:27:31.520: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the rc1
+STEP: create the rc2
+STEP: set half of pods created by rc simpletest-rc-to-be-deleted to have rc simpletest-rc-to-stay as owner as well
+STEP: delete the rc simpletest-rc-to-be-deleted
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0506 08:27:41.752173      14 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May  6 08:27:41.752: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:27:41.752: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-pwhq5" for this suite.
+May  6 08:27:47.767: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:27:47.950: INFO: namespace: e2e-tests-gc-pwhq5, resource: bindings, ignored listing per whitelist
+May  6 08:27:48.067: INFO: namespace e2e-tests-gc-pwhq5 deletion completed in 6.311477134s
+
+• [SLOW TEST:16.547 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with configmap pod with mountPath of existing file [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:27:48.067: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with configmap pod with mountPath of existing file [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-configmap-gkjf
+STEP: Creating a pod to test atomic-volume-subpath
+May  6 08:27:48.257: INFO: Waiting up to 5m0s for pod "pod-subpath-test-configmap-gkjf" in namespace "e2e-tests-subpath-gl59p" to be "success or failure"
+May  6 08:27:48.297: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Pending", Reason="", readiness=false. Elapsed: 39.647969ms
+May  6 08:27:50.301: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Pending", Reason="", readiness=false. Elapsed: 2.043966761s
+May  6 08:27:52.307: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 4.04972178s
+May  6 08:27:54.326: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 6.069222295s
+May  6 08:27:56.332: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 8.075149821s
+May  6 08:27:58.337: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 10.080276439s
+May  6 08:28:00.342: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 12.084595356s
+May  6 08:28:02.349: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 14.092058699s
+May  6 08:28:04.354: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 16.096797602s
+May  6 08:28:06.357: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 18.100342828s
+May  6 08:28:08.365: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 20.107609809s
+May  6 08:28:10.369: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Running", Reason="", readiness=false. Elapsed: 22.111600844s
+May  6 08:28:12.374: INFO: Pod "pod-subpath-test-configmap-gkjf": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.116728394s
+STEP: Saw pod success
+May  6 08:28:12.374: INFO: Pod "pod-subpath-test-configmap-gkjf" satisfied condition "success or failure"
+May  6 08:28:12.385: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-subpath-test-configmap-gkjf container test-container-subpath-configmap-gkjf: 
+STEP: delete the pod
+May  6 08:28:12.417: INFO: Waiting for pod pod-subpath-test-configmap-gkjf to disappear
+May  6 08:28:12.420: INFO: Pod pod-subpath-test-configmap-gkjf no longer exists
+STEP: Deleting pod pod-subpath-test-configmap-gkjf
+May  6 08:28:12.420: INFO: Deleting pod "pod-subpath-test-configmap-gkjf" in namespace "e2e-tests-subpath-gl59p"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:28:12.423: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-gl59p" for this suite.
+May  6 08:28:18.440: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:28:18.525: INFO: namespace: e2e-tests-subpath-gl59p, resource: bindings, ignored listing per whitelist
+May  6 08:28:18.574: INFO: namespace e2e-tests-subpath-gl59p deletion completed in 6.146874899s
+
+• [SLOW TEST:30.507 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with configmap pod with mountPath of existing file [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-node] Downward API 
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:28:18.575: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward api env vars
+May  6 08:28:18.761: INFO: Waiting up to 5m0s for pod "downward-api-e6cf270b-6fd8-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-pl2dz" to be "success or failure"
+May  6 08:28:18.772: INFO: Pod "downward-api-e6cf270b-6fd8-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 10.22127ms
+May  6 08:28:20.776: INFO: Pod "downward-api-e6cf270b-6fd8-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014352183s
+STEP: Saw pod success
+May  6 08:28:20.776: INFO: Pod "downward-api-e6cf270b-6fd8-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:28:20.779: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downward-api-e6cf270b-6fd8-11e9-a235-ba138c0d9035 container dapi-container: 
+STEP: delete the pod
+May  6 08:28:20.823: INFO: Waiting for pod downward-api-e6cf270b-6fd8-11e9-a235-ba138c0d9035 to disappear
+May  6 08:28:20.827: INFO: Pod downward-api-e6cf270b-6fd8-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:28:20.827: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-pl2dz" for this suite.
+May  6 08:28:26.846: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:28:26.967: INFO: namespace: e2e-tests-downward-api-pl2dz, resource: bindings, ignored listing per whitelist
+May  6 08:28:26.980: INFO: namespace e2e-tests-downward-api-pl2dz deletion completed in 6.146465414s
+
+• [SLOW TEST:8.406 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide pod UID as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:28:26.981: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating the pod
+May  6 08:28:29.679: INFO: Successfully updated pod "labelsupdateebcf03b0-6fd8-11e9-a235-ba138c0d9035"
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:28:31.700: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-7dzv5" for this suite.
+May  6 08:28:53.728: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:28:53.758: INFO: namespace: e2e-tests-projected-7dzv5, resource: bindings, ignored listing per whitelist
+May  6 08:28:53.882: INFO: namespace e2e-tests-projected-7dzv5 deletion completed in 22.17754104s
+
+• [SLOW TEST:26.902 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should update labels on modification [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:28:53.885: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0644 on node default medium
+May  6 08:28:54.040: INFO: Waiting up to 5m0s for pod "pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-5mngj" to be "success or failure"
+May  6 08:28:54.057: INFO: Pod "pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 16.901981ms
+May  6 08:28:56.062: INFO: Pod "pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.022419015s
+May  6 08:28:58.069: INFO: Pod "pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.029334963s
+STEP: Saw pod success
+May  6 08:28:58.070: INFO: Pod "pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:28:58.074: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035 container test-container: 
+STEP: delete the pod
+May  6 08:28:58.098: INFO: Waiting for pod pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035 to disappear
+May  6 08:28:58.108: INFO: Pod pod-fbd6cd77-6fd8-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:28:58.108: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-5mngj" for this suite.
+May  6 08:29:04.128: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:29:04.290: INFO: namespace: e2e-tests-emptydir-5mngj, resource: bindings, ignored listing per whitelist
+May  6 08:29:04.295: INFO: namespace e2e-tests-emptydir-5mngj deletion completed in 6.18285795s
+
+• [SLOW TEST:10.410 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0644,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-node] Downward API 
+  should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:29:04.296: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward api env vars
+May  6 08:29:04.461: INFO: Waiting up to 5m0s for pod "downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-ksgc9" to be "success or failure"
+May  6 08:29:04.473: INFO: Pod "downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 11.565621ms
+May  6 08:29:06.480: INFO: Pod "downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018578953s
+May  6 08:29:08.490: INFO: Pod "downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.028283013s
+STEP: Saw pod success
+May  6 08:29:08.490: INFO: Pod "downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:29:08.494: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035 container dapi-container: 
+STEP: delete the pod
+May  6 08:29:08.530: INFO: Waiting for pod downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035 to disappear
+May  6 08:29:08.534: INFO: Pod downward-api-020d4ae0-6fd9-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:29:08.534: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-ksgc9" for this suite.
+May  6 08:29:14.553: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:29:14.649: INFO: namespace: e2e-tests-downward-api-ksgc9, resource: bindings, ignored listing per whitelist
+May  6 08:29:14.689: INFO: namespace e2e-tests-downward-api-ksgc9 deletion completed in 6.149698336s
+
+• [SLOW TEST:10.394 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:29:14.693: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test use defaults
+May  6 08:29:14.849: INFO: Waiting up to 5m0s for pod "client-containers-083e8448-6fd9-11e9-a235-ba138c0d9035" in namespace "e2e-tests-containers-rw2h5" to be "success or failure"
+May  6 08:29:14.855: INFO: Pod "client-containers-083e8448-6fd9-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.071372ms
+May  6 08:29:16.902: INFO: Pod "client-containers-083e8448-6fd9-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.053793439s
+STEP: Saw pod success
+May  6 08:29:16.903: INFO: Pod "client-containers-083e8448-6fd9-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:29:16.907: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod client-containers-083e8448-6fd9-11e9-a235-ba138c0d9035 container test-container: 
+STEP: delete the pod
+May  6 08:29:17.029: INFO: Waiting for pod client-containers-083e8448-6fd9-11e9-a235-ba138c0d9035 to disappear
+May  6 08:29:17.036: INFO: Pod client-containers-083e8448-6fd9-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:29:17.036: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-containers-rw2h5" for this suite.
+May  6 08:29:23.083: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:29:23.147: INFO: namespace: e2e-tests-containers-rw2h5, resource: bindings, ignored listing per whitelist
+May  6 08:29:23.264: INFO: namespace e2e-tests-containers-rw2h5 deletion completed in 6.217749651s
+
+• [SLOW TEST:8.571 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should use the image defaults if command and args are blank [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run pod 
+  should create a pod from an image when restart is Never  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:29:23.265: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl run pod
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1527
+[It] should create a pod from an image when restart is Never  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May  6 08:29:23.414: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 run e2e-test-nginx-pod --restart=Never --generator=run-pod/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-zhf5s'
+May  6 08:29:24.367: INFO: stderr: ""
+May  6 08:29:24.367: INFO: stdout: "pod/e2e-test-nginx-pod created\n"
+STEP: verifying the pod e2e-test-nginx-pod was created
+[AfterEach] [k8s.io] Kubectl run pod
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1532
+May  6 08:29:24.372: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete pods e2e-test-nginx-pod --namespace=e2e-tests-kubectl-zhf5s'
+May  6 08:29:30.158: INFO: stderr: ""
+May  6 08:29:30.158: INFO: stdout: "pod \"e2e-test-nginx-pod\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:29:30.158: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-zhf5s" for this suite.
+May  6 08:29:36.186: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:29:36.203: INFO: namespace: e2e-tests-kubectl-zhf5s, resource: bindings, ignored listing per whitelist
+May  6 08:29:36.376: INFO: namespace e2e-tests-kubectl-zhf5s deletion completed in 6.209807763s
+
+• [SLOW TEST:13.112 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run pod
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create a pod from an image when restart is Never  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl label 
+  should update the label on a resource  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:29:36.379: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1052
+STEP: creating the pod
+May  6 08:29:36.587: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-p727x'
+May  6 08:29:36.866: INFO: stderr: ""
+May  6 08:29:36.866: INFO: stdout: "pod/pause created\n"
+May  6 08:29:36.866: INFO: Waiting up to 5m0s for 1 pods to be running and ready: [pause]
+May  6 08:29:36.866: INFO: Waiting up to 5m0s for pod "pause" in namespace "e2e-tests-kubectl-p727x" to be "running and ready"
+May  6 08:29:36.873: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 7.585738ms
+May  6 08:29:38.877: INFO: Pod "pause": Phase="Pending", Reason="", readiness=false. Elapsed: 2.011379239s
+May  6 08:29:40.881: INFO: Pod "pause": Phase="Running", Reason="", readiness=true. Elapsed: 4.015480166s
+May  6 08:29:40.881: INFO: Pod "pause" satisfied condition "running and ready"
+May  6 08:29:40.881: INFO: Wanted all 1 pods to be running and ready. Result: true. Pods: [pause]
+[It] should update the label on a resource  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: adding the label testing-label with value testing-label-value to a pod
+May  6 08:29:40.882: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 label pods pause testing-label=testing-label-value --namespace=e2e-tests-kubectl-p727x'
+May  6 08:29:41.016: INFO: stderr: ""
+May  6 08:29:41.016: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod has the label testing-label with the value testing-label-value
+May  6 08:29:41.016: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pod pause -L testing-label --namespace=e2e-tests-kubectl-p727x'
+May  6 08:29:41.142: INFO: stderr: ""
+May  6 08:29:41.142: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          5s    testing-label-value\n"
+STEP: removing the label testing-label of a pod
+May  6 08:29:41.142: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 label pods pause testing-label- --namespace=e2e-tests-kubectl-p727x'
+May  6 08:29:41.270: INFO: stderr: ""
+May  6 08:29:41.270: INFO: stdout: "pod/pause labeled\n"
+STEP: verifying the pod doesn't have the label testing-label
+May  6 08:29:41.270: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pod pause -L testing-label --namespace=e2e-tests-kubectl-p727x'
+May  6 08:29:41.388: INFO: stderr: ""
+May  6 08:29:41.388: INFO: stdout: "NAME    READY   STATUS    RESTARTS   AGE   TESTING-LABEL\npause   1/1     Running   0          5s    \n"
+[AfterEach] [k8s.io] Kubectl label
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1059
+STEP: using delete to clean up resources
+May  6 08:29:41.388: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-p727x'
+May  6 08:29:41.523: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May  6 08:29:41.523: INFO: stdout: "pod \"pause\" force deleted\n"
+May  6 08:29:41.523: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get rc,svc -l name=pause --no-headers --namespace=e2e-tests-kubectl-p727x'
+May  6 08:29:41.701: INFO: stderr: "No resources found.\n"
+May  6 08:29:41.701: INFO: stdout: ""
+May  6 08:29:41.701: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -l name=pause --namespace=e2e-tests-kubectl-p727x -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+May  6 08:29:41.839: INFO: stderr: ""
+May  6 08:29:41.839: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:29:41.839: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-p727x" for this suite.
+May  6 08:29:47.874: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:29:48.013: INFO: namespace: e2e-tests-kubectl-p727x, resource: bindings, ignored listing per whitelist
+May  6 08:29:48.025: INFO: namespace e2e-tests-kubectl-p727x deletion completed in 6.176072732s
+
+• [SLOW TEST:11.647 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl label
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should update the label on a resource  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected configMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:29:48.026: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with configMap that has name projected-configmap-test-upd-1c1847c0-6fd9-11e9-a235-ba138c0d9035
+STEP: Creating the pod
+STEP: Updating configmap projected-configmap-test-upd-1c1847c0-6fd9-11e9-a235-ba138c0d9035
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:31:16.890: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-kqdlz" for this suite.
+May  6 08:31:38.915: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:31:38.993: INFO: namespace: e2e-tests-projected-kqdlz, resource: bindings, ignored listing per whitelist
+May  6 08:31:39.090: INFO: namespace e2e-tests-projected-kqdlz deletion completed in 22.194115408s
+
+• [SLOW TEST:111.064 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir wrapper volumes 
+  should not cause race condition when used for configmaps [Serial] [Slow] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:31:39.091: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename emptydir-wrapper
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not cause race condition when used for configmaps [Serial] [Slow] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating 50 configmaps
+STEP: Creating RC which spawns configmap-volume pods
+May  6 08:31:39.656: INFO: Pod name wrapped-volume-race-5e8d41cb-6fd9-11e9-a235-ba138c0d9035: Found 0 pods out of 5
+May  6 08:31:44.684: INFO: Pod name wrapped-volume-race-5e8d41cb-6fd9-11e9-a235-ba138c0d9035: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-5e8d41cb-6fd9-11e9-a235-ba138c0d9035 in namespace e2e-tests-emptydir-wrapper-65b6b, will wait for the garbage collector to delete the pods
+May  6 08:31:54.804: INFO: Deleting ReplicationController wrapped-volume-race-5e8d41cb-6fd9-11e9-a235-ba138c0d9035 took: 8.641307ms
+May  6 08:31:54.905: INFO: Terminating ReplicationController wrapped-volume-race-5e8d41cb-6fd9-11e9-a235-ba138c0d9035 pods took: 100.35994ms
+STEP: Creating RC which spawns configmap-volume pods
+May  6 08:32:40.624: INFO: Pod name wrapped-volume-race-82e3a99a-6fd9-11e9-a235-ba138c0d9035: Found 0 pods out of 5
+May  6 08:32:45.636: INFO: Pod name wrapped-volume-race-82e3a99a-6fd9-11e9-a235-ba138c0d9035: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-82e3a99a-6fd9-11e9-a235-ba138c0d9035 in namespace e2e-tests-emptydir-wrapper-65b6b, will wait for the garbage collector to delete the pods
+May  6 08:32:57.761: INFO: Deleting ReplicationController wrapped-volume-race-82e3a99a-6fd9-11e9-a235-ba138c0d9035 took: 21.102271ms
+May  6 08:32:57.861: INFO: Terminating ReplicationController wrapped-volume-race-82e3a99a-6fd9-11e9-a235-ba138c0d9035 pods took: 100.253078ms
+STEP: Creating RC which spawns configmap-volume pods
+May  6 08:33:40.399: INFO: Pod name wrapped-volume-race-a681c864-6fd9-11e9-a235-ba138c0d9035: Found 1 pods out of 5
+May  6 08:33:45.415: INFO: Pod name wrapped-volume-race-a681c864-6fd9-11e9-a235-ba138c0d9035: Found 5 pods out of 5
+STEP: Ensuring each pod is running
+STEP: deleting ReplicationController wrapped-volume-race-a681c864-6fd9-11e9-a235-ba138c0d9035 in namespace e2e-tests-emptydir-wrapper-65b6b, will wait for the garbage collector to delete the pods
+May  6 08:33:55.527: INFO: Deleting ReplicationController wrapped-volume-race-a681c864-6fd9-11e9-a235-ba138c0d9035 took: 13.440039ms
+May  6 08:33:55.627: INFO: Terminating ReplicationController wrapped-volume-race-a681c864-6fd9-11e9-a235-ba138c0d9035 pods took: 100.286616ms
+STEP: Cleaning up the configMaps
+[AfterEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:34:41.856: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-wrapper-65b6b" for this suite.
+May  6 08:34:49.879: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:34:49.913: INFO: namespace: e2e-tests-emptydir-wrapper-65b6b, resource: bindings, ignored listing per whitelist
+May  6 08:34:50.035: INFO: namespace e2e-tests-emptydir-wrapper-65b6b deletion completed in 8.173949768s
+
+• [SLOW TEST:190.944 seconds]
+[sig-storage] EmptyDir wrapper volumes
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  should not cause race condition when used for configmaps [Serial] [Slow] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl logs 
+  should be able to retrieve and filter logs  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:34:50.035: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl logs
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1134
+STEP: creating an rc
+May  6 08:34:50.147: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-65cb7'
+May  6 08:34:50.426: INFO: stderr: ""
+May  6 08:34:50.426: INFO: stdout: "replicationcontroller/redis-master created\n"
+[It] should be able to retrieve and filter logs  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Waiting for Redis master to start.
+May  6 08:34:51.435: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:34:51.435: INFO: Found 0 / 1
+May  6 08:34:52.431: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:34:52.431: INFO: Found 0 / 1
+May  6 08:34:53.430: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:34:53.430: INFO: Found 1 / 1
+May  6 08:34:53.430: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+May  6 08:34:53.440: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:34:53.440: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+STEP: checking for a matching strings
+May  6 08:34:53.440: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 logs redis-master-p2cdx redis-master --namespace=e2e-tests-kubectl-65cb7'
+May  6 08:34:53.583: INFO: stderr: ""
+May  6 08:34:53.583: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 06 May 08:34:51.724 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 06 May 08:34:51.724 # Server started, Redis version 3.2.12\n1:M 06 May 08:34:51.724 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 06 May 08:34:51.724 * The server is now ready to accept connections on port 6379\n"
+STEP: limiting log lines
+May  6 08:34:53.583: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 log redis-master-p2cdx redis-master --namespace=e2e-tests-kubectl-65cb7 --tail=1'
+May  6 08:34:53.714: INFO: stderr: ""
+May  6 08:34:53.714: INFO: stdout: "1:M 06 May 08:34:51.724 * The server is now ready to accept connections on port 6379\n"
+STEP: limiting log bytes
+May  6 08:34:53.714: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 log redis-master-p2cdx redis-master --namespace=e2e-tests-kubectl-65cb7 --limit-bytes=1'
+May  6 08:34:53.848: INFO: stderr: ""
+May  6 08:34:53.848: INFO: stdout: " "
+STEP: exposing timestamps
+May  6 08:34:53.848: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 log redis-master-p2cdx redis-master --namespace=e2e-tests-kubectl-65cb7 --tail=1 --timestamps'
+May  6 08:34:53.978: INFO: stderr: ""
+May  6 08:34:53.978: INFO: stdout: "2019-05-06T08:34:51.725352658Z 1:M 06 May 08:34:51.724 * The server is now ready to accept connections on port 6379\n"
+STEP: restricting to a time range
+May  6 08:34:56.479: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 log redis-master-p2cdx redis-master --namespace=e2e-tests-kubectl-65cb7 --since=1s'
+May  6 08:34:56.630: INFO: stderr: ""
+May  6 08:34:56.630: INFO: stdout: ""
+May  6 08:34:56.630: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 log redis-master-p2cdx redis-master --namespace=e2e-tests-kubectl-65cb7 --since=24h'
+May  6 08:34:56.749: INFO: stderr: ""
+May  6 08:34:56.749: INFO: stdout: "                _._                                                  \n           _.-``__ ''-._                                             \n      _.-``    `.  `_.  ''-._           Redis 3.2.12 (35a5711f/0) 64 bit\n  .-`` .-```.  ```\\/    _.,_ ''-._                                   \n (    '      ,       .-`  | `,    )     Running in standalone mode\n |`-._`-...-` __...-.``-._|'` _.-'|     Port: 6379\n |    `-._   `._    /     _.-'    |     PID: 1\n  `-._    `-._  `-./  _.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |           http://redis.io        \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n |`-._`-._    `-.__.-'    _.-'_.-'|                                  \n |    `-._`-._        _.-'_.-'    |                                  \n  `-._    `-._`-.__.-'_.-'    _.-'                                   \n      `-._    `-.__.-'    _.-'                                       \n          `-._        _.-'                                           \n              `-.__.-'                                               \n\n1:M 06 May 08:34:51.724 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.\n1:M 06 May 08:34:51.724 # Server started, Redis version 3.2.12\n1:M 06 May 08:34:51.724 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.\n1:M 06 May 08:34:51.724 * The server is now ready to accept connections on port 6379\n"
+[AfterEach] [k8s.io] Kubectl logs
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1140
+STEP: using delete to clean up resources
+May  6 08:34:56.749: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete --grace-period=0 --force -f - --namespace=e2e-tests-kubectl-65cb7'
+May  6 08:34:56.867: INFO: stderr: "warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.\n"
+May  6 08:34:56.867: INFO: stdout: "replicationcontroller \"redis-master\" force deleted\n"
+May  6 08:34:56.867: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get rc,svc -l name=nginx --no-headers --namespace=e2e-tests-kubectl-65cb7'
+May  6 08:34:57.008: INFO: stderr: "No resources found.\n"
+May  6 08:34:57.008: INFO: stdout: ""
+May  6 08:34:57.008: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -l name=nginx --namespace=e2e-tests-kubectl-65cb7 -o go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ "\n" }}{{ end }}{{ end }}'
+May  6 08:34:57.182: INFO: stderr: ""
+May  6 08:34:57.182: INFO: stdout: ""
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:34:57.182: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-65cb7" for this suite.
+May  6 08:35:03.208: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:35:03.236: INFO: namespace: e2e-tests-kubectl-65cb7, resource: bindings, ignored listing per whitelist
+May  6 08:35:03.360: INFO: namespace e2e-tests-kubectl-65cb7 deletion completed in 6.170645834s
+
+• [SLOW TEST:13.325 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl logs
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should be able to retrieve and filter logs  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Secrets 
+  should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:35:03.360: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-d8092242-6fd9-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:35:03.474: INFO: Waiting up to 5m0s for pod "pod-secrets-d809e343-6fd9-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-b5skl" to be "success or failure"
+May  6 08:35:03.485: INFO: Pod "pod-secrets-d809e343-6fd9-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 10.646661ms
+May  6 08:35:05.489: INFO: Pod "pod-secrets-d809e343-6fd9-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01540744s
+STEP: Saw pod success
+May  6 08:35:05.490: INFO: Pod "pod-secrets-d809e343-6fd9-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:35:05.495: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-d809e343-6fd9-11e9-a235-ba138c0d9035 container secret-env-test: 
+STEP: delete the pod
+May  6 08:35:05.519: INFO: Waiting for pod pod-secrets-d809e343-6fd9-11e9-a235-ba138c0d9035 to disappear
+May  6 08:35:05.524: INFO: Pod pod-secrets-d809e343-6fd9-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-api-machinery] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:35:05.524: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-b5skl" for this suite.
+May  6 08:35:11.545: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:35:11.563: INFO: namespace: e2e-tests-secrets-b5skl, resource: bindings, ignored listing per whitelist
+May  6 08:35:11.676: INFO: namespace e2e-tests-secrets-b5skl deletion completed in 6.146606878s
+
+• [SLOW TEST:8.315 seconds]
+[sig-api-machinery] Secrets
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets.go:32
+  should be consumable from pods in env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run deployment 
+  should create a deployment from an image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:35:11.676: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1399
+[It] should create a deployment from an image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May  6 08:35:11.795: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --generator=deployment/v1beta1 --namespace=e2e-tests-kubectl-7dc4p'
+May  6 08:35:11.915: INFO: stderr: "kubectl run --generator=deployment/v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+May  6 08:35:11.915: INFO: stdout: "deployment.extensions/e2e-test-nginx-deployment created\n"
+STEP: verifying the deployment e2e-test-nginx-deployment was created
+STEP: verifying the pod controlled by deployment e2e-test-nginx-deployment was created
+[AfterEach] [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1404
+May  6 08:35:13.945: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete deployment e2e-test-nginx-deployment --namespace=e2e-tests-kubectl-7dc4p'
+May  6 08:35:14.063: INFO: stderr: ""
+May  6 08:35:14.063: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:35:14.063: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-7dc4p" for this suite.
+May  6 08:35:36.087: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:35:36.213: INFO: namespace: e2e-tests-kubectl-7dc4p, resource: bindings, ignored listing per whitelist
+May  6 08:35:36.270: INFO: namespace e2e-tests-kubectl-7dc4p deletion completed in 22.199613146s
+
+• [SLOW TEST:24.594 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create a deployment from an image  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] Services 
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:35:36.271: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename services
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85
+[It] should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating service multi-endpoint-test in namespace e2e-tests-services-22tcv
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-22tcv to expose endpoints map[]
+May  6 08:35:36.433: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-22tcv exposes endpoints map[] (4.979217ms elapsed)
+STEP: Creating pod pod1 in namespace e2e-tests-services-22tcv
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-22tcv to expose endpoints map[pod1:[100]]
+May  6 08:35:39.502: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-22tcv exposes endpoints map[pod1:[100]] (3.053783594s elapsed)
+STEP: Creating pod pod2 in namespace e2e-tests-services-22tcv
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-22tcv to expose endpoints map[pod1:[100] pod2:[101]]
+May  6 08:35:42.566: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-22tcv exposes endpoints map[pod2:[101] pod1:[100]] (3.057272954s elapsed)
+STEP: Deleting pod pod1 in namespace e2e-tests-services-22tcv
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-22tcv to expose endpoints map[pod2:[101]]
+May  6 08:35:43.624: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-22tcv exposes endpoints map[pod2:[101]] (1.031761154s elapsed)
+STEP: Deleting pod pod2 in namespace e2e-tests-services-22tcv
+STEP: waiting up to 3m0s for service multi-endpoint-test in namespace e2e-tests-services-22tcv to expose endpoints map[]
+May  6 08:35:43.639: INFO: successfully validated that service multi-endpoint-test in namespace e2e-tests-services-22tcv exposes endpoints map[] (7.110988ms elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:35:43.662: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-services-22tcv" for this suite.
+May  6 08:36:05.691: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:36:05.757: INFO: namespace: e2e-tests-services-22tcv, resource: bindings, ignored listing per whitelist
+May  6 08:36:05.893: INFO: namespace e2e-tests-services-22tcv deletion completed in 22.224719501s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90
+
+• [SLOW TEST:29.622 seconds]
+[sig-network] Services
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve multiport endpoints from pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:36:05.894: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-fd5d00c9-6fd9-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:36:06.100: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-fd5de76f-6fd9-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-7k4h5" to be "success or failure"
+May  6 08:36:06.107: INFO: Pod "pod-projected-configmaps-fd5de76f-6fd9-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.296483ms
+May  6 08:36:08.116: INFO: Pod "pod-projected-configmaps-fd5de76f-6fd9-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015607407s
+STEP: Saw pod success
+May  6 08:36:08.116: INFO: Pod "pod-projected-configmaps-fd5de76f-6fd9-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:36:08.119: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-configmaps-fd5de76f-6fd9-11e9-a235-ba138c0d9035 container projected-configmap-volume-test: 
+STEP: delete the pod
+May  6 08:36:08.149: INFO: Waiting for pod pod-projected-configmaps-fd5de76f-6fd9-11e9-a235-ba138c0d9035 to disappear
+May  6 08:36:08.156: INFO: Pod pod-projected-configmaps-fd5de76f-6fd9-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:36:08.156: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-7k4h5" for this suite.
+May  6 08:36:14.179: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:36:14.392: INFO: namespace: e2e-tests-projected-7k4h5, resource: bindings, ignored listing per whitelist
+May  6 08:36:14.393: INFO: namespace e2e-tests-projected-7k4h5 deletion completed in 6.233339086s
+
+• [SLOW TEST:8.499 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (root,0644,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:36:14.398: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (root,0644,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0644 on tmpfs
+May  6 08:36:14.564: INFO: Waiting up to 5m0s for pod "pod-02691ab9-6fda-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-rgzvv" to be "success or failure"
+May  6 08:36:14.573: INFO: Pod "pod-02691ab9-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 9.53381ms
+May  6 08:36:16.587: INFO: Pod "pod-02691ab9-6fda-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023225019s
+STEP: Saw pod success
+May  6 08:36:16.588: INFO: Pod "pod-02691ab9-6fda-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:36:16.592: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-02691ab9-6fda-11e9-a235-ba138c0d9035 container test-container: 
+STEP: delete the pod
+May  6 08:36:16.619: INFO: Waiting for pod pod-02691ab9-6fda-11e9-a235-ba138c0d9035 to disappear
+May  6 08:36:16.623: INFO: Pod pod-02691ab9-6fda-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:36:16.623: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-rgzvv" for this suite.
+May  6 08:36:22.658: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:36:22.782: INFO: namespace: e2e-tests-emptydir-rgzvv, resource: bindings, ignored listing per whitelist
+May  6 08:36:22.782: INFO: namespace e2e-tests-emptydir-rgzvv deletion completed in 6.15374464s
+
+• [SLOW TEST:8.384 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (root,0644,tmpfs) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:36:22.784: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the rc
+STEP: delete the rc
+STEP: wait for the rc to be deleted
+STEP: Gathering metrics
+W0506 08:36:29.035087      14 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May  6 08:36:29.035: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:36:29.035: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-5ccnx" for this suite.
+May  6 08:36:35.072: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:36:35.146: INFO: namespace: e2e-tests-gc-5ccnx, resource: bindings, ignored listing per whitelist
+May  6 08:36:35.224: INFO: namespace e2e-tests-gc-5ccnx deletion completed in 6.174598873s
+
+• [SLOW TEST:12.441 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl version 
+  should check is all data is printed  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:36:35.226: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should check is all data is printed  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:36:35.418: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 version'
+May  6 08:36:35.568: INFO: stderr: ""
+May  6 08:36:35.568: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.3\", GitCommit:\"721bfa751924da8d1680787490c54b9179b1fed0\", GitTreeState:\"clean\", BuildDate:\"2019-02-01T20:08:12Z\", GoVersion:\"go1.11.5\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"13\", GitVersion:\"v1.13.3\", GitCommit:\"2bba0127d85d5a46ab4b778548be28623b32d0b0\", GitTreeState:\"clean\", BuildDate:\"2019-04-04T11:24:37Z\", GoVersion:\"go1.11.4\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:36:35.568: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-frrc7" for this suite.
+May  6 08:36:41.591: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:36:41.691: INFO: namespace: e2e-tests-kubectl-frrc7, resource: bindings, ignored listing per whitelist
+May  6 08:36:41.725: INFO: namespace e2e-tests-kubectl-frrc7 deletion completed in 6.151999075s
+
+• [SLOW TEST:6.500 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl version
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should check is all data is printed  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] Secrets 
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:36:41.727: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name s-test-opt-del-12ae8dd4-6fda-11e9-a235-ba138c0d9035
+STEP: Creating secret with name s-test-opt-upd-12ae8e2d-6fda-11e9-a235-ba138c0d9035
+STEP: Creating the pod
+STEP: Deleting secret s-test-opt-del-12ae8dd4-6fda-11e9-a235-ba138c0d9035
+STEP: Updating secret s-test-opt-upd-12ae8e2d-6fda-11e9-a235-ba138c0d9035
+STEP: Creating secret with name s-test-opt-create-12ae8e4a-6fda-11e9-a235-ba138c0d9035
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:38:04.565: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-dgjpm" for this suite.
+May  6 08:38:26.583: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:38:26.617: INFO: namespace: e2e-tests-secrets-dgjpm, resource: bindings, ignored listing per whitelist
+May  6 08:38:26.740: INFO: namespace e2e-tests-secrets-dgjpm deletion completed in 22.17035062s
+
+• [SLOW TEST:105.013 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  optional updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:38:26.740: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 08:38:26.988: INFO: Waiting up to 5m0s for pod "downwardapi-volume-5157fb49-6fda-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-vf458" to be "success or failure"
+May  6 08:38:27.010: INFO: Pod "downwardapi-volume-5157fb49-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 22.036318ms
+May  6 08:38:29.017: INFO: Pod "downwardapi-volume-5157fb49-6fda-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.028995646s
+STEP: Saw pod success
+May  6 08:38:29.017: INFO: Pod "downwardapi-volume-5157fb49-6fda-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:38:29.022: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-5157fb49-6fda-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 08:38:29.049: INFO: Waiting for pod downwardapi-volume-5157fb49-6fda-11e9-a235-ba138c0d9035 to disappear
+May  6 08:38:29.054: INFO: Pod downwardapi-volume-5157fb49-6fda-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:38:29.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-vf458" for this suite.
+May  6 08:38:35.088: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:38:35.318: INFO: namespace: e2e-tests-projected-vf458, resource: bindings, ignored listing per whitelist
+May  6 08:38:35.338: INFO: namespace e2e-tests-projected-vf458 deletion completed in 6.279430793s
+
+• [SLOW TEST:8.598 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run --rm job 
+  should create a job from an image, then delete the job  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:38:35.339: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should create a job from an image, then delete the job  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: executing a command with run --rm and attach with stdin
+May  6 08:38:35.490: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 --namespace=e2e-tests-kubectl-zv822 run e2e-test-rm-busybox-job --image=docker.io/library/busybox:1.29 --rm=true --generator=job/v1 --restart=OnFailure --attach=true --stdin -- sh -c cat && echo 'stdin closed''
+May  6 08:38:38.190: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\nIf you don't see a command prompt, try pressing enter.\n"
+May  6 08:38:38.190: INFO: stdout: "abcd1234stdin closed\njob.batch \"e2e-test-rm-busybox-job\" deleted\n"
+STEP: verifying the job e2e-test-rm-busybox-job was deleted
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:38:40.210: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-zv822" for this suite.
+May  6 08:38:54.233: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:38:54.272: INFO: namespace: e2e-tests-kubectl-zv822, resource: bindings, ignored listing per whitelist
+May  6 08:38:54.327: INFO: namespace e2e-tests-kubectl-zv822 deletion completed in 14.111750688s
+
+• [SLOW TEST:18.988 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run --rm job
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create a job from an image, then delete the job  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
+  should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:38:54.328: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename statefulset
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:59
+[BeforeEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:74
+STEP: Creating service test in namespace e2e-tests-statefulset-pnzlb
+[It] should perform rolling updates and roll backs of template modifications [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a new StatefulSet
+May  6 08:38:54.454: INFO: Found 0 stateful pods, waiting for 3
+May  6 08:39:04.460: INFO: Waiting for pod ss2-0 to enter Running - Ready=true, currently Running - Ready=true
+May  6 08:39:04.460: INFO: Waiting for pod ss2-1 to enter Running - Ready=true, currently Running - Ready=true
+May  6 08:39:04.460: INFO: Waiting for pod ss2-2 to enter Running - Ready=true, currently Running - Ready=true
+May  6 08:39:04.472: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-pnzlb ss2-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May  6 08:39:04.766: INFO: stderr: ""
+May  6 08:39:04.766: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May  6 08:39:04.766: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+STEP: Updating StatefulSet template: update image from docker.io/library/nginx:1.14-alpine to docker.io/library/nginx:1.15-alpine
+May  6 08:39:14.810: INFO: Updating stateful set ss2
+STEP: Creating a new revision
+STEP: Updating Pods in reverse ordinal order
+May  6 08:39:24.842: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-pnzlb ss2-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+May  6 08:39:25.108: INFO: stderr: ""
+May  6 08:39:25.108: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+May  6 08:39:25.108: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+May  6 08:39:45.162: INFO: Waiting for StatefulSet e2e-tests-statefulset-pnzlb/ss2 to complete update
+STEP: Rolling back to a previous revision
+May  6 08:39:55.170: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-pnzlb ss2-1 -- /bin/sh -c mv -v /usr/share/nginx/html/index.html /tmp/ || true'
+May  6 08:39:55.470: INFO: stderr: ""
+May  6 08:39:55.470: INFO: stdout: "'/usr/share/nginx/html/index.html' -> '/tmp/index.html'\n"
+May  6 08:39:55.470: INFO: stdout of mv -v /usr/share/nginx/html/index.html /tmp/ || true on ss2-1: '/usr/share/nginx/html/index.html' -> '/tmp/index.html'
+
+May  6 08:39:55.521: INFO: Updating stateful set ss2
+STEP: Rolling back update in reverse ordinal order
+May  6 08:40:05.581: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 exec --namespace=e2e-tests-statefulset-pnzlb ss2-1 -- /bin/sh -c mv -v /tmp/index.html /usr/share/nginx/html/ || true'
+May  6 08:40:06.339: INFO: stderr: ""
+May  6 08:40:06.339: INFO: stdout: "'/tmp/index.html' -> '/usr/share/nginx/html/index.html'\n"
+May  6 08:40:06.339: INFO: stdout of mv -v /tmp/index.html /usr/share/nginx/html/ || true on ss2-1: '/tmp/index.html' -> '/usr/share/nginx/html/index.html'
+
+May  6 08:40:16.376: INFO: Waiting for StatefulSet e2e-tests-statefulset-pnzlb/ss2 to complete update
+May  6 08:40:16.376: INFO: Waiting for Pod e2e-tests-statefulset-pnzlb/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+May  6 08:40:16.376: INFO: Waiting for Pod e2e-tests-statefulset-pnzlb/ss2-1 to have revision ss2-787997d666 update revision ss2-c79899b9
+May  6 08:40:16.376: INFO: Waiting for Pod e2e-tests-statefulset-pnzlb/ss2-2 to have revision ss2-787997d666 update revision ss2-c79899b9
+May  6 08:40:26.385: INFO: Waiting for StatefulSet e2e-tests-statefulset-pnzlb/ss2 to complete update
+May  6 08:40:26.385: INFO: Waiting for Pod e2e-tests-statefulset-pnzlb/ss2-0 to have revision ss2-787997d666 update revision ss2-c79899b9
+May  6 08:40:26.385: INFO: Waiting for Pod e2e-tests-statefulset-pnzlb/ss2-1 to have revision ss2-787997d666 update revision ss2-c79899b9
+May  6 08:40:36.386: INFO: Waiting for StatefulSet e2e-tests-statefulset-pnzlb/ss2 to complete update
+[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/statefulset.go:85
+May  6 08:40:46.389: INFO: Deleting all statefulset in ns e2e-tests-statefulset-pnzlb
+May  6 08:40:46.393: INFO: Scaling statefulset ss2 to 0
+May  6 08:40:56.432: INFO: Waiting for statefulset status.replicas updated to 0
+May  6 08:40:56.435: INFO: Deleting statefulset ss2
+[AfterEach] [sig-apps] StatefulSet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:40:56.462: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-statefulset-pnzlb" for this suite.
+May  6 08:41:02.492: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:41:02.592: INFO: namespace: e2e-tests-statefulset-pnzlb, resource: bindings, ignored listing per whitelist
+May  6 08:41:02.649: INFO: namespace e2e-tests-statefulset-pnzlb deletion completed in 6.18249406s
+
+• [SLOW TEST:128.322 seconds]
+[sig-apps] StatefulSet
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should perform rolling updates and roll backs of template modifications [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] EmptyDir volumes 
+  volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:41:02.650: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir volume type on tmpfs
+May  6 08:41:02.773: INFO: Waiting up to 5m0s for pod "pod-ae32cf62-6fda-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-zpkcp" to be "success or failure"
+May  6 08:41:02.780: INFO: Pod "pod-ae32cf62-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.203644ms
+May  6 08:41:04.792: INFO: Pod "pod-ae32cf62-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.018991222s
+May  6 08:41:06.801: INFO: Pod "pod-ae32cf62-6fda-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.027885029s
+STEP: Saw pod success
+May  6 08:41:06.803: INFO: Pod "pod-ae32cf62-6fda-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:41:06.809: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-ae32cf62-6fda-11e9-a235-ba138c0d9035 container test-container: 
+STEP: delete the pod
+May  6 08:41:06.836: INFO: Waiting for pod pod-ae32cf62-6fda-11e9-a235-ba138c0d9035 to disappear
+May  6 08:41:06.843: INFO: Pod pod-ae32cf62-6fda-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:41:06.843: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-zpkcp" for this suite.
+May  6 08:41:12.861: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:41:12.881: INFO: namespace: e2e-tests-emptydir-zpkcp, resource: bindings, ignored listing per whitelist
+May  6 08:41:13.022: INFO: namespace e2e-tests-emptydir-zpkcp deletion completed in 6.175764986s
+
+• [SLOW TEST:10.373 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  volume on tmpfs should have the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:41:13.023: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 08:41:13.169: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b4640247-6fda-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-8xqlf" to be "success or failure"
+May  6 08:41:13.178: INFO: Pod "downwardapi-volume-b4640247-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 9.481328ms
+May  6 08:41:15.185: INFO: Pod "downwardapi-volume-b4640247-6fda-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.015919774s
+STEP: Saw pod success
+May  6 08:41:15.185: INFO: Pod "downwardapi-volume-b4640247-6fda-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:41:15.188: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-b4640247-6fda-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 08:41:15.227: INFO: Waiting for pod downwardapi-volume-b4640247-6fda-11e9-a235-ba138c0d9035 to disappear
+May  6 08:41:15.233: INFO: Pod downwardapi-volume-b4640247-6fda-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:41:15.233: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-8xqlf" for this suite.
+May  6 08:41:21.266: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:41:21.405: INFO: namespace: e2e-tests-downward-api-8xqlf, resource: bindings, ignored listing per whitelist
+May  6 08:41:21.457: INFO: namespace e2e-tests-downward-api-8xqlf deletion completed in 6.218846598s
+
+• [SLOW TEST:8.434 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's memory limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:41:21.459: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename namespaces
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a test namespace
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a pod in the namespace
+STEP: Waiting for the pod to have running status
+STEP: Creating an uninitialized pod in the namespace
+May  6 08:41:23.747: INFO: error from create uninitialized namespace: 
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Verifying there are no pods in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:41:47.873: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-namespaces-ftcpl" for this suite.
+May  6 08:41:53.894: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:41:53.933: INFO: namespace: e2e-tests-namespaces-ftcpl, resource: bindings, ignored listing per whitelist
+May  6 08:41:54.048: INFO: namespace e2e-tests-namespaces-ftcpl deletion completed in 6.167311875s
+STEP: Destroying namespace "e2e-tests-nsdeletetest-j7h7t" for this suite.
+May  6 08:41:54.052: INFO: Namespace e2e-tests-nsdeletetest-j7h7t was already deleted
+STEP: Destroying namespace "e2e-tests-nsdeletetest-nz8mh" for this suite.
+May  6 08:42:00.075: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:42:00.169: INFO: namespace: e2e-tests-nsdeletetest-nz8mh, resource: bindings, ignored listing per whitelist
+May  6 08:42:00.244: INFO: namespace e2e-tests-nsdeletetest-nz8mh deletion completed in 6.192280837s
+
+• [SLOW TEST:38.785 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all pods are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:42:00.245: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-d08a91fc-6fda-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:42:00.392: INFO: Waiting up to 5m0s for pod "pod-configmaps-d08b6f44-6fda-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-vb4sg" to be "success or failure"
+May  6 08:42:00.403: INFO: Pod "pod-configmaps-d08b6f44-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 10.865782ms
+May  6 08:42:02.410: INFO: Pod "pod-configmaps-d08b6f44-6fda-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017839259s
+STEP: Saw pod success
+May  6 08:42:02.410: INFO: Pod "pod-configmaps-d08b6f44-6fda-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:42:02.414: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-d08b6f44-6fda-11e9-a235-ba138c0d9035 container configmap-volume-test: 
+STEP: delete the pod
+May  6 08:42:02.497: INFO: Waiting for pod pod-configmaps-d08b6f44-6fda-11e9-a235-ba138c0d9035 to disappear
+May  6 08:42:02.502: INFO: Pod pod-configmaps-d08b6f44-6fda-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:42:02.503: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-vb4sg" for this suite.
+May  6 08:42:08.522: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:42:08.537: INFO: namespace: e2e-tests-configmap-vb4sg, resource: bindings, ignored listing per whitelist
+May  6 08:42:08.691: INFO: namespace e2e-tests-configmap-vb4sg deletion completed in 6.184562406s
+
+• [SLOW TEST:8.445 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicaSet 
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:42:08.692: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename replicaset
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:42:08.908: INFO: Creating ReplicaSet my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035
+May  6 08:42:08.918: INFO: Pod name my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035: Found 0 pods out of 1
+May  6 08:42:13.924: INFO: Pod name my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035: Found 1 pods out of 1
+May  6 08:42:13.924: INFO: Ensuring a pod for ReplicaSet "my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035" is running
+May  6 08:42:13.930: INFO: Pod "my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035-78z7q" is running (conditions: [{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 08:42:08 +0000 UTC Reason: Message:} {Type:Ready Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 08:42:10 +0000 UTC Reason: Message:} {Type:ContainersReady Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 08:42:10 +0000 UTC Reason: Message:} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2019-05-06 08:42:08 +0000 UTC Reason: Message:}])
+May  6 08:42:13.931: INFO: Trying to dial the pod
+May  6 08:42:18.947: INFO: Controller my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035: Got expected result from replica 1 [my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035-78z7q]: "my-hostname-basic-d59fd753-6fda-11e9-a235-ba138c0d9035-78z7q", 1 of 1 required successes so far
+[AfterEach] [sig-apps] ReplicaSet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:42:18.949: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replicaset-2whjz" for this suite.
+May  6 08:42:24.972: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:42:25.118: INFO: namespace: e2e-tests-replicaset-2whjz, resource: bindings, ignored listing per whitelist
+May  6 08:42:25.149: INFO: namespace e2e-tests-replicaset-2whjz deletion completed in 6.193168056s
+
+• [SLOW TEST:16.457 seconds]
+[sig-apps] ReplicaSet
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should serve a basic image on each replica with a public image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:42:25.150: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name projected-secret-test-df6cf2ec-6fda-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:42:25.366: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-zcq7l" to be "success or failure"
+May  6 08:42:25.396: INFO: Pod "pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 30.066086ms
+May  6 08:42:27.401: INFO: Pod "pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.035326537s
+May  6 08:42:29.406: INFO: Pod "pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.039495467s
+STEP: Saw pod success
+May  6 08:42:29.406: INFO: Pod "pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:42:29.409: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035 container secret-volume-test: 
+STEP: delete the pod
+May  6 08:42:29.431: INFO: Waiting for pod pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035 to disappear
+May  6 08:42:29.435: INFO: Pod pod-projected-secrets-df6dbe9c-6fda-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:42:29.436: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-zcq7l" for this suite.
+May  6 08:42:35.457: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:42:35.613: INFO: namespace: e2e-tests-projected-zcq7l, resource: bindings, ignored listing per whitelist
+May  6 08:42:35.632: INFO: namespace e2e-tests-projected-zcq7l deletion completed in 6.19216707s
+
+• [SLOW TEST:10.483 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:42:35.637: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename sched-pred
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+May  6 08:42:35.786: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+May  6 08:42:35.795: INFO: Waiting for terminating namespaces to be deleted...
+May  6 08:42:35.805: INFO: 
+Logging pods the kubelet thinks is on node kubernetes-cluster-2696-minion-0 before test
+May  6 08:42:35.815: INFO: calico-node-glwdg from kube-system started at 2019-05-06 07:08:43 +0000 UTC (2 container statuses recorded)
+May  6 08:42:35.816: INFO:  Container calico-node ready: true, restart count 0
+May  6 08:42:35.816: INFO:  Container install-cni ready: true, restart count 0
+May  6 08:42:35.816: INFO: sonobuoy from heptio-sonobuoy started at 2019-05-06 07:26:42 +0000 UTC (1 container statuses recorded)
+May  6 08:42:35.816: INFO:  Container kube-sonobuoy ready: true, restart count 0
+May  6 08:42:35.816: INFO: sonobuoy-e2e-job-a8abc7b1c06240a4 from heptio-sonobuoy started at 2019-05-06 07:26:49 +0000 UTC (2 container statuses recorded)
+May  6 08:42:35.816: INFO:  Container e2e ready: true, restart count 0
+May  6 08:42:35.816: INFO:  Container sonobuoy-worker ready: true, restart count 0
+May  6 08:42:35.816: INFO: calico-kube-controllers-79f9487886-28vsz from kube-system started at 2019-05-06 07:08:55 +0000 UTC (1 container statuses recorded)
+May  6 08:42:35.816: INFO:  Container calico-kube-controllers ready: true, restart count 0
+May  6 08:42:35.817: INFO: sonobuoy-systemd-logs-daemon-set-03c53cfc64d4424c-sq9gg from heptio-sonobuoy started at 2019-05-06 07:26:49 +0000 UTC (2 container statuses recorded)
+May  6 08:42:35.817: INFO:  Container sonobuoy-systemd-logs-config ready: true, restart count 1
+May  6 08:42:35.818: INFO:  Container sonobuoy-worker ready: true, restart count 1
+[It] validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Trying to schedule Pod with nonempty NodeSelector.
+STEP: Considering event: 
+Type = [Warning], Name = [restricted-pod.159c0b721a3fdcb8], Reason = [FailedScheduling], Message = [0/2 nodes are available: 2 node(s) didn't match node selector.]
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:42:36.862: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-sched-pred-r48mk" for this suite.
+May  6 08:42:42.881: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:42:42.901: INFO: namespace: e2e-tests-sched-pred-r48mk, resource: bindings, ignored listing per whitelist
+May  6 08:42:43.031: INFO: namespace e2e-tests-sched-pred-r48mk deletion completed in 6.163237872s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:7.395 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates that NodeSelector is respected if not matching  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:42:43.037: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:42:43.235: INFO: pod1.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod3", UID:"ea121496-6fda-11e9-8e1b-fa163ee16beb", Controller:(*bool)(0xc00246af42), BlockOwnerDeletion:(*bool)(0xc00246af43)}}
+May  6 08:42:43.245: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"ea0fd5e3-6fda-11e9-8e1b-fa163ee16beb", Controller:(*bool)(0xc00246b1a6), BlockOwnerDeletion:(*bool)(0xc00246b1a7)}}
+May  6 08:42:43.251: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"ea10bf49-6fda-11e9-8e1b-fa163ee16beb", Controller:(*bool)(0xc001d6b016), BlockOwnerDeletion:(*bool)(0xc001d6b017)}}
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:42:48.275: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-dwr7z" for this suite.
+May  6 08:42:54.310: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:42:54.450: INFO: namespace: e2e-tests-gc-dwr7z, resource: bindings, ignored listing per whitelist
+May  6 08:42:54.462: INFO: namespace e2e-tests-gc-dwr7z deletion completed in 6.182014111s
+
+• [SLOW TEST:11.426 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should not be blocked by dependency circle [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-api-machinery] Namespaces [Serial] 
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:42:54.469: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename namespaces
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a test namespace
+STEP: Waiting for a default service account to be provisioned in namespace
+STEP: Creating a service in the namespace
+STEP: Deleting the namespace
+STEP: Waiting for the namespace to be removed.
+STEP: Recreating the namespace
+STEP: Verifying there is no service in the namespace
+[AfterEach] [sig-api-machinery] Namespaces [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:43:00.789: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-namespaces-b9fwz" for this suite.
+May  6 08:43:06.830: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:43:06.879: INFO: namespace: e2e-tests-namespaces-b9fwz, resource: bindings, ignored listing per whitelist
+May  6 08:43:06.956: INFO: namespace e2e-tests-namespaces-b9fwz deletion completed in 6.159873594s
+STEP: Destroying namespace "e2e-tests-nsdeletetest-4j5pt" for this suite.
+May  6 08:43:06.961: INFO: Namespace e2e-tests-nsdeletetest-4j5pt was already deleted
+STEP: Destroying namespace "e2e-tests-nsdeletetest-5hc4z" for this suite.
+May  6 08:43:12.990: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:43:13.087: INFO: namespace: e2e-tests-nsdeletetest-5hc4z, resource: bindings, ignored listing per whitelist
+May  6 08:43:13.125: INFO: namespace e2e-tests-nsdeletetest-5hc4z deletion completed in 6.162880447s
+
+• [SLOW TEST:18.656 seconds]
+[sig-api-machinery] Namespaces [Serial]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should ensure that all services are removed when a namespace is deleted [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:43:13.130: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 08:43:13.300: INFO: Waiting up to 5m0s for pod "downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-dzgl7" to be "success or failure"
+May  6 08:43:13.315: INFO: Pod "downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 13.030084ms
+May  6 08:43:15.319: INFO: Pod "downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.017227222s
+May  6 08:43:17.335: INFO: Pod "downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.033474376s
+STEP: Saw pod success
+May  6 08:43:17.336: INFO: Pod "downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:43:17.346: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 08:43:17.381: INFO: Waiting for pod downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035 to disappear
+May  6 08:43:17.385: INFO: Pod downwardapi-volume-fbff1096-6fda-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:43:17.385: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-dzgl7" for this suite.
+May  6 08:43:23.404: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:43:23.547: INFO: namespace: e2e-tests-projected-dzgl7, resource: bindings, ignored listing per whitelist
+May  6 08:43:23.570: INFO: namespace e2e-tests-projected-dzgl7 deletion completed in 6.180730618s
+
+• [SLOW TEST:10.441 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run job 
+  should create a job from an image when restart is OnFailure  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:43:23.571: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl run job
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1454
+[It] should create a job from an image when restart is OnFailure  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May  6 08:43:23.703: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 run e2e-test-nginx-job --restart=OnFailure --generator=job/v1 --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-ldhbg'
+May  6 08:43:24.503: INFO: stderr: "kubectl run --generator=job/v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+May  6 08:43:24.503: INFO: stdout: "job.batch/e2e-test-nginx-job created\n"
+STEP: verifying the job e2e-test-nginx-job was created
+[AfterEach] [k8s.io] Kubectl run job
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1459
+May  6 08:43:24.538: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete jobs e2e-test-nginx-job --namespace=e2e-tests-kubectl-ldhbg'
+May  6 08:43:24.696: INFO: stderr: ""
+May  6 08:43:24.696: INFO: stdout: "job.batch \"e2e-test-nginx-job\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:43:24.696: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-ldhbg" for this suite.
+May  6 08:43:30.719: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:43:30.843: INFO: namespace: e2e-tests-kubectl-ldhbg, resource: bindings, ignored listing per whitelist
+May  6 08:43:30.843: INFO: namespace e2e-tests-kubectl-ldhbg deletion completed in 6.136852836s
+
+• [SLOW TEST:7.273 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run job
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create a job from an image when restart is OnFailure  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-api-machinery] Garbage collector 
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:43:30.847: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename gc
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the deployment
+STEP: Wait for the Deployment to create new ReplicaSet
+STEP: delete the deployment
+STEP: wait for all rs to be garbage collected
+STEP: expected 0 pods, got 2 pods
+STEP: expected 0 rs, got 1 rs
+STEP: Gathering metrics
+W0506 08:43:31.612759      14 metrics_grabber.go:81] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
+May  6 08:43:31.613: INFO: For apiserver_request_count:
+For apiserver_request_latencies_summary:
+For etcd_helper_cache_entry_count:
+For etcd_helper_cache_hit_count:
+For etcd_helper_cache_miss_count:
+For etcd_request_cache_add_latencies_summary:
+For etcd_request_cache_get_latencies_summary:
+For etcd_request_latencies_summary:
+For garbage_collector_attempt_to_delete_queue_latency:
+For garbage_collector_attempt_to_delete_work_duration:
+For garbage_collector_attempt_to_orphan_queue_latency:
+For garbage_collector_attempt_to_orphan_work_duration:
+For garbage_collector_dirty_processing_latency_microseconds:
+For garbage_collector_event_processing_latency_microseconds:
+For garbage_collector_graph_changes_queue_latency:
+For garbage_collector_graph_changes_work_duration:
+For garbage_collector_orphan_processing_latency_microseconds:
+For namespace_queue_latency:
+For namespace_queue_latency_sum:
+For namespace_queue_latency_count:
+For namespace_retries:
+For namespace_work_duration:
+For namespace_work_duration_sum:
+For namespace_work_duration_count:
+For function_duration_seconds:
+For errors_total:
+For evicted_pods_total:
+
+[AfterEach] [sig-api-machinery] Garbage collector
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:43:31.613: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-gc-md8c8" for this suite.
+May  6 08:43:37.630: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:43:37.665: INFO: namespace: e2e-tests-gc-md8c8, resource: bindings, ignored listing per whitelist
+May  6 08:43:37.797: INFO: namespace e2e-tests-gc-md8c8 deletion completed in 6.178267969s
+
+• [SLOW TEST:6.950 seconds]
+[sig-api-machinery] Garbage collector
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  should delete RS created by deployment when not orphaning [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Projected downwardAPI 
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:43:37.800: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:39
+[It] should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 08:43:37.936: INFO: Waiting up to 5m0s for pod "downwardapi-volume-0aad898e-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-tskzv" to be "success or failure"
+May  6 08:43:37.956: INFO: Pod "downwardapi-volume-0aad898e-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 19.750871ms
+May  6 08:43:39.964: INFO: Pod "downwardapi-volume-0aad898e-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.027405787s
+STEP: Saw pod success
+May  6 08:43:39.964: INFO: Pod "downwardapi-volume-0aad898e-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:43:39.970: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-0aad898e-6fdb-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 08:43:40.009: INFO: Waiting for pod downwardapi-volume-0aad898e-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:43:40.012: INFO: Pod downwardapi-volume-0aad898e-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected downwardAPI
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:43:40.012: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-tskzv" for this suite.
+May  6 08:43:46.039: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:43:46.076: INFO: namespace: e2e-tests-projected-tskzv, resource: bindings, ignored listing per whitelist
+May  6 08:43:46.206: INFO: namespace e2e-tests-projected-tskzv deletion completed in 6.188892911s
+
+• [SLOW TEST:8.407 seconds]
+[sig-storage] Projected downwardAPI
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_downwardapi.go:33
+  should provide container's memory request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-apps] ReplicationController 
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:43:46.207: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename replication-controller
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Given a ReplicationController is created
+STEP: When the matched label of one of its pods change
+May  6 08:43:46.398: INFO: Pod name pod-release: Found 0 pods out of 1
+May  6 08:43:51.404: INFO: Pod name pod-release: Found 1 pods out of 1
+STEP: Then the pod is released
+[AfterEach] [sig-apps] ReplicationController
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:43:52.421: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-replication-controller-dvwhf" for this suite.
+May  6 08:43:58.439: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:43:58.580: INFO: namespace: e2e-tests-replication-controller-dvwhf, resource: bindings, ignored listing per whitelist
+May  6 08:43:58.598: INFO: namespace e2e-tests-replication-controller-dvwhf deletion completed in 6.171677557s
+
+• [SLOW TEST:12.392 seconds]
+[sig-apps] ReplicationController
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should release no longer matching pods [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] Probing container 
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:43:58.600: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod liveness-http in namespace e2e-tests-container-probe-qgwzl
+May  6 08:44:00.782: INFO: Started pod liveness-http in namespace e2e-tests-container-probe-qgwzl
+STEP: checking the pod's current state and verifying that restartCount is present
+May  6 08:44:00.787: INFO: Initial restart count of pod liveness-http is 0
+May  6 08:44:26.860: INFO: Restart count of pod e2e-tests-container-probe-qgwzl/liveness-http is now 1 (26.073089623s elapsed)
+STEP: deleting the pod
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:44:26.873: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-qgwzl" for this suite.
+May  6 08:44:32.898: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:44:32.939: INFO: namespace: e2e-tests-container-probe-qgwzl, resource: bindings, ignored listing per whitelist
+May  6 08:44:33.053: INFO: namespace e2e-tests-container-probe-qgwzl deletion completed in 6.175937994s
+
+• [SLOW TEST:34.455 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:44:33.054: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename sched-pred
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+May  6 08:44:33.210: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+May  6 08:44:33.218: INFO: Waiting for terminating namespaces to be deleted...
+May  6 08:44:33.224: INFO: 
+Logging pods the kubelet thinks is on node kubernetes-cluster-2696-minion-0 before test
+May  6 08:44:33.235: INFO: sonobuoy-e2e-job-a8abc7b1c06240a4 from heptio-sonobuoy started at 2019-05-06 07:26:49 +0000 UTC (2 container statuses recorded)
+May  6 08:44:33.236: INFO:  Container e2e ready: true, restart count 0
+May  6 08:44:33.236: INFO:  Container sonobuoy-worker ready: true, restart count 0
+May  6 08:44:33.236: INFO: calico-kube-controllers-79f9487886-28vsz from kube-system started at 2019-05-06 07:08:55 +0000 UTC (1 container statuses recorded)
+May  6 08:44:33.236: INFO:  Container calico-kube-controllers ready: true, restart count 0
+May  6 08:44:33.236: INFO: sonobuoy-systemd-logs-daemon-set-03c53cfc64d4424c-sq9gg from heptio-sonobuoy started at 2019-05-06 07:26:49 +0000 UTC (2 container statuses recorded)
+May  6 08:44:33.236: INFO:  Container sonobuoy-systemd-logs-config ready: true, restart count 1
+May  6 08:44:33.236: INFO:  Container sonobuoy-worker ready: true, restart count 1
+May  6 08:44:33.236: INFO: calico-node-glwdg from kube-system started at 2019-05-06 07:08:43 +0000 UTC (2 container statuses recorded)
+May  6 08:44:33.238: INFO:  Container calico-node ready: true, restart count 0
+May  6 08:44:33.238: INFO:  Container install-cni ready: true, restart count 0
+May  6 08:44:33.238: INFO: sonobuoy from heptio-sonobuoy started at 2019-05-06 07:26:42 +0000 UTC (1 container statuses recorded)
+May  6 08:44:33.238: INFO:  Container kube-sonobuoy ready: true, restart count 0
+[It] validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Trying to launch a pod without a label to get a node which can launch it.
+STEP: Explicitly delete pod here to free the resource it takes.
+STEP: Trying to apply a random label on the found node.
+STEP: verifying the node has the label kubernetes.io/e2e-2cde38b4-6fdb-11e9-a235-ba138c0d9035 42
+STEP: Trying to relaunch the pod, now with labels.
+STEP: removing the label kubernetes.io/e2e-2cde38b4-6fdb-11e9-a235-ba138c0d9035 off the node kubernetes-cluster-2696-minion-0
+STEP: verifying the node doesn't have the label kubernetes.io/e2e-2cde38b4-6fdb-11e9-a235-ba138c0d9035
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:44:37.397: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-sched-pred-xf6wv" for this suite.
+May  6 08:44:45.425: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:44:45.515: INFO: namespace: e2e-tests-sched-pred-xf6wv, resource: bindings, ignored listing per whitelist
+May  6 08:44:45.582: INFO: namespace e2e-tests-sched-pred-xf6wv deletion completed in 8.177408169s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:12.529 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates that NodeSelector is respected if matching  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-apps] Deployment 
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:44:45.583: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:44:45.751: INFO: Creating deployment "test-recreate-deployment"
+May  6 08:44:45.757: INFO: Waiting deployment "test-recreate-deployment" to be updated to revision 1
+May  6 08:44:45.775: INFO: deployment "test-recreate-deployment" doesn't have the required revision set
+May  6 08:44:47.786: INFO: Waiting deployment "test-recreate-deployment" to complete
+May  6 08:44:47.791: INFO: Triggering a new rollout for deployment "test-recreate-deployment"
+May  6 08:44:47.803: INFO: Updating deployment test-recreate-deployment
+May  6 08:44:47.803: INFO: Watching deployment "test-recreate-deployment" to verify that new pods will not run with olds pods
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+May  6 08:44:47.932: INFO: Deployment "test-recreate-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment,GenerateName:,Namespace:e2e-tests-deployment-zshmn,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-zshmn/deployments/test-recreate-deployment,UID:331d042a-6fdb-11e9-8e1b-fa163ee16beb,ResourceVersion:22471,Generation:2,CreationTimestamp:2019-05-06 08:44:45 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:Recreate,RollingUpdate:nil,},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:2,Replicas:1,UpdatedReplicas:1,AvailableReplicas:0,UnavailableReplicas:1,Conditions:[{Available False 2019-05-06 08:44:47 +0000 UTC 2019-05-06 08:44:47 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-05-06 08:44:47 +0000 UTC 2019-05-06 08:44:45 +0000 UTC ReplicaSetUpdated ReplicaSet "test-recreate-deployment-697fbf54bf" is progressing.}],ReadyReplicas:0,CollisionCount:nil,},}
+
+May  6 08:44:47.936: INFO: New ReplicaSet "test-recreate-deployment-697fbf54bf" of Deployment "test-recreate-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-697fbf54bf,GenerateName:,Namespace:e2e-tests-deployment-zshmn,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-zshmn/replicasets/test-recreate-deployment-697fbf54bf,UID:345ceb20-6fdb-11e9-8e1b-fa163ee16beb,ResourceVersion:22470,Generation:1,CreationTimestamp:2019-05-06 08:44:47 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 331d042a-6fdb-11e9-8e1b-fa163ee16beb 0xc00244d567 0xc00244d568}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*1,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:1,FullyLabeledReplicas:1,ObservedGeneration:1,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May  6 08:44:47.936: INFO: All old ReplicaSets of Deployment "test-recreate-deployment":
+May  6 08:44:47.936: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-5dfdcc846d,GenerateName:,Namespace:e2e-tests-deployment-zshmn,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-zshmn/replicasets/test-recreate-deployment-5dfdcc846d,UID:331d8a01-6fdb-11e9-8e1b-fa163ee16beb,ResourceVersion:22460,Generation:2,CreationTimestamp:2019-05-06 08:44:45 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 5dfdcc846d,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 1,deployment.kubernetes.io/max-replicas: 1,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment test-recreate-deployment 331d042a-6fdb-11e9-8e1b-fa163ee16beb 0xc00244d437 0xc00244d438}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*0,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: sample-pod-3,pod-template-hash: 5dfdcc846d,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 5dfdcc846d,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{redis gcr.io/kubernetes-e2e-test-images/redis:1.0 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:0,FullyLabeledReplicas:0,ObservedGeneration:2,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May  6 08:44:47.940: INFO: Pod "test-recreate-deployment-697fbf54bf-w8m6b" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:test-recreate-deployment-697fbf54bf-w8m6b,GenerateName:test-recreate-deployment-697fbf54bf-,Namespace:e2e-tests-deployment-zshmn,SelfLink:/api/v1/namespaces/e2e-tests-deployment-zshmn/pods/test-recreate-deployment-697fbf54bf-w8m6b,UID:345dbc74-6fdb-11e9-8e1b-fa163ee16beb,ResourceVersion:22472,Generation:0,CreationTimestamp:2019-05-06 08:44:47 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: sample-pod-3,pod-template-hash: 697fbf54bf,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet test-recreate-deployment-697fbf54bf 345ceb20-6fdb-11e9-8e1b-fa163ee16beb 0xc00244def7 0xc00244def8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-fp9b2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-fp9b2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-fp9b2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:44:47 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:44:47 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:44:47 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:44:47 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:44:47 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:44:47.940: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-zshmn" for this suite.
+May  6 08:44:53.962: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:44:54.056: INFO: namespace: e2e-tests-deployment-zshmn, resource: bindings, ignored listing per whitelist
+May  6 08:44:54.108: INFO: namespace e2e-tests-deployment-zshmn deletion completed in 6.164889082s
+
+• [SLOW TEST:8.525 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  RecreateDeployment should delete old pods and create new ones [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSS
+------------------------------
+[sig-node] ConfigMap 
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:44:54.113: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap e2e-tests-configmap-kmfht/configmap-test-3828d69a-6fdb-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:44:54.239: INFO: Waiting up to 5m0s for pod "pod-configmaps-382a2a0d-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-kmfht" to be "success or failure"
+May  6 08:44:54.244: INFO: Pod "pod-configmaps-382a2a0d-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 5.057446ms
+May  6 08:44:56.262: INFO: Pod "pod-configmaps-382a2a0d-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.022981891s
+STEP: Saw pod success
+May  6 08:44:56.262: INFO: Pod "pod-configmaps-382a2a0d-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:44:56.271: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-382a2a0d-6fdb-11e9-a235-ba138c0d9035 container env-test: 
+STEP: delete the pod
+May  6 08:44:56.299: INFO: Waiting for pod pod-configmaps-382a2a0d-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:44:56.304: INFO: Pod pod-configmaps-382a2a0d-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-node] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:44:56.304: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-kmfht" for this suite.
+May  6 08:45:02.322: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:45:02.540: INFO: namespace: e2e-tests-configmap-kmfht, resource: bindings, ignored listing per whitelist
+May  6 08:45:02.576: INFO: namespace e2e-tests-configmap-kmfht deletion completed in 6.267425206s
+
+• [SLOW TEST:8.464 seconds]
+[sig-node] ConfigMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap.go:31
+  should be consumable via environment variable [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl run default 
+  should create an rc or deployment from an image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:45:02.577: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Kubectl run default
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1262
+[It] should create an rc or deployment from an image  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: running the image docker.io/library/nginx:1.14-alpine
+May  6 08:45:02.810: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 run e2e-test-nginx-deployment --image=docker.io/library/nginx:1.14-alpine --namespace=e2e-tests-kubectl-jr8k8'
+May  6 08:45:02.979: INFO: stderr: "kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.\n"
+May  6 08:45:02.979: INFO: stdout: "deployment.apps/e2e-test-nginx-deployment created\n"
+STEP: verifying the pod controlled by e2e-test-nginx-deployment gets created
+[AfterEach] [k8s.io] Kubectl run default
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:1268
+May  6 08:45:03.013: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 delete deployment e2e-test-nginx-deployment --namespace=e2e-tests-kubectl-jr8k8'
+May  6 08:45:03.178: INFO: stderr: ""
+May  6 08:45:03.178: INFO: stdout: "deployment.extensions \"e2e-test-nginx-deployment\" deleted\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:45:03.178: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-jr8k8" for this suite.
+May  6 08:45:25.211: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:45:25.262: INFO: namespace: e2e-tests-kubectl-jr8k8, resource: bindings, ignored listing per whitelist
+May  6 08:45:25.362: INFO: namespace e2e-tests-kubectl-jr8k8 deletion completed in 22.17695048s
+
+• [SLOW TEST:22.786 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl run default
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should create an rc or deployment from an image  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[k8s.io] Pods 
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:45:25.364: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:45:27.559: INFO: Waiting up to 5m0s for pod "client-envvars-4c058d1f-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-pods-7wfhg" to be "success or failure"
+May  6 08:45:27.567: INFO: Pod "client-envvars-4c058d1f-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.681042ms
+May  6 08:45:29.577: INFO: Pod "client-envvars-4c058d1f-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017587062s
+STEP: Saw pod success
+May  6 08:45:29.577: INFO: Pod "client-envvars-4c058d1f-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:45:29.581: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod client-envvars-4c058d1f-6fdb-11e9-a235-ba138c0d9035 container env3cont: 
+STEP: delete the pod
+May  6 08:45:29.628: INFO: Waiting for pod client-envvars-4c058d1f-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:45:29.639: INFO: Pod client-envvars-4c058d1f-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:45:29.639: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-7wfhg" for this suite.
+May  6 08:46:07.658: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:46:07.700: INFO: namespace: e2e-tests-pods-7wfhg, resource: bindings, ignored listing per whitelist
+May  6 08:46:07.820: INFO: namespace e2e-tests-pods-7wfhg deletion completed in 38.176200381s
+
+• [SLOW TEST:42.456 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should contain environment variables for services [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected combined 
+  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected combined
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:46:07.820: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-projected-all-test-volume-641840c7-6fdb-11e9-a235-ba138c0d9035
+STEP: Creating secret with name secret-projected-all-test-volume-641840a8-6fdb-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test Check all projections for projected volume plugin
+May  6 08:46:07.950: INFO: Waiting up to 5m0s for pod "projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-znnvg" to be "success or failure"
+May  6 08:46:07.956: INFO: Pod "projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 5.684691ms
+May  6 08:46:09.960: INFO: Pod "projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.009907914s
+May  6 08:46:11.967: INFO: Pod "projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.016060013s
+STEP: Saw pod success
+May  6 08:46:11.967: INFO: Pod "projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:46:11.971: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035 container projected-all-volume-test: 
+STEP: delete the pod
+May  6 08:46:12.010: INFO: Waiting for pod projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:46:12.019: INFO: Pod projected-volume-64184050-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected combined
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:46:12.020: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-znnvg" for this suite.
+May  6 08:46:18.058: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:46:18.140: INFO: namespace: e2e-tests-projected-znnvg, resource: bindings, ignored listing per whitelist
+May  6 08:46:18.213: INFO: namespace e2e-tests-projected-znnvg deletion completed in 6.187204929s
+
+• [SLOW TEST:10.393 seconds]
+[sig-storage] Projected combined
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_combined.go:31
+  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] EmptyDir wrapper volumes 
+  should not conflict [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:46:18.215: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename emptydir-wrapper
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should not conflict [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Cleaning up the secret
+STEP: Cleaning up the configmap
+STEP: Cleaning up the pod
+[AfterEach] [sig-storage] EmptyDir wrapper volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:46:20.472: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-wrapper-zbppw" for this suite.
+May  6 08:46:26.491: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:46:26.652: INFO: namespace: e2e-tests-emptydir-wrapper-zbppw, resource: bindings, ignored listing per whitelist
+May  6 08:46:26.660: INFO: namespace e2e-tests-emptydir-wrapper-zbppw deletion completed in 6.183915955s
+
+• [SLOW TEST:8.446 seconds]
+[sig-storage] EmptyDir wrapper volumes
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  should not conflict [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] HostPath 
+  should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:46:26.661: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename hostpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:37
+[It] should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test hostPath mode
+May  6 08:46:26.772: INFO: Waiting up to 5m0s for pod "pod-host-path-test" in namespace "e2e-tests-hostpath-4lfqm" to be "success or failure"
+May  6 08:46:26.777: INFO: Pod "pod-host-path-test": Phase="Pending", Reason="", readiness=false. Elapsed: 4.790793ms
+May  6 08:46:28.784: INFO: Pod "pod-host-path-test": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012050319s
+STEP: Saw pod success
+May  6 08:46:28.784: INFO: Pod "pod-host-path-test" satisfied condition "success or failure"
+May  6 08:46:28.788: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-host-path-test container test-container-1: 
+STEP: delete the pod
+May  6 08:46:28.812: INFO: Waiting for pod pod-host-path-test to disappear
+May  6 08:46:28.816: INFO: Pod pod-host-path-test no longer exists
+[AfterEach] [sig-storage] HostPath
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:46:28.817: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-hostpath-4lfqm" for this suite.
+May  6 08:46:34.831: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:46:34.943: INFO: namespace: e2e-tests-hostpath-4lfqm, resource: bindings, ignored listing per whitelist
+May  6 08:46:34.977: INFO: namespace e2e-tests-hostpath-4lfqm deletion completed in 6.15771247s
+
+• [SLOW TEST:8.317 seconds]
+[sig-storage] HostPath
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/host_path.go:34
+  should give a volume the correct mode [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:46:34.979: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-map-744bfb9e-6fdb-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:46:35.128: INFO: Waiting up to 5m0s for pod "pod-secrets-744ca1dd-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-5dd6d" to be "success or failure"
+May  6 08:46:35.142: INFO: Pod "pod-secrets-744ca1dd-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 14.137737ms
+May  6 08:46:37.164: INFO: Pod "pod-secrets-744ca1dd-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.036230201s
+STEP: Saw pod success
+May  6 08:46:37.165: INFO: Pod "pod-secrets-744ca1dd-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:46:37.168: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-744ca1dd-6fdb-11e9-a235-ba138c0d9035 container secret-volume-test: 
+STEP: delete the pod
+May  6 08:46:37.194: INFO: Waiting for pod pod-secrets-744ca1dd-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:46:37.201: INFO: Pod pod-secrets-744ca1dd-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:46:37.201: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-5dd6d" for this suite.
+May  6 08:46:43.222: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:46:43.354: INFO: namespace: e2e-tests-secrets-5dd6d, resource: bindings, ignored listing per whitelist
+May  6 08:46:43.354: INFO: namespace e2e-tests-secrets-5dd6d deletion completed in 6.147055238s
+
+• [SLOW TEST:8.376 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl patch 
+  should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:46:43.356: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should add annotations for pods in rc  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating Redis RC
+May  6 08:46:43.475: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-r2wrl'
+May  6 08:46:43.744: INFO: stderr: ""
+May  6 08:46:43.744: INFO: stdout: "replicationcontroller/redis-master created\n"
+STEP: Waiting for Redis master to start.
+May  6 08:46:44.760: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:46:44.760: INFO: Found 0 / 1
+May  6 08:46:45.750: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:46:45.750: INFO: Found 0 / 1
+May  6 08:46:46.749: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:46:46.749: INFO: Found 1 / 1
+May  6 08:46:46.749: INFO: WaitFor completed with timeout 5m0s.  Pods found = 1 out of 1
+STEP: patching all pods
+May  6 08:46:46.753: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:46:46.753: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+May  6 08:46:46.753: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 patch pod redis-master-mmkcr --namespace=e2e-tests-kubectl-r2wrl -p {"metadata":{"annotations":{"x":"y"}}}'
+May  6 08:46:46.880: INFO: stderr: ""
+May  6 08:46:46.880: INFO: stdout: "pod/redis-master-mmkcr patched\n"
+STEP: checking annotations
+May  6 08:46:46.885: INFO: Selector matched 1 pods for map[app:redis]
+May  6 08:46:46.885: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:46:46.885: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-r2wrl" for this suite.
+May  6 08:47:08.903: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:47:08.965: INFO: namespace: e2e-tests-kubectl-r2wrl, resource: bindings, ignored listing per whitelist
+May  6 08:47:09.055: INFO: namespace e2e-tests-kubectl-r2wrl deletion completed in 22.165327968s
+
+• [SLOW TEST:25.699 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl patch
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should add annotations for pods in rc  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-scheduling] SchedulerPredicates [Serial] 
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:47:09.057: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename sched-pred
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:79
+May  6 08:47:09.216: INFO: Waiting up to 1m0s for all (but 0) nodes to be ready
+May  6 08:47:09.226: INFO: Waiting for terminating namespaces to be deleted...
+May  6 08:47:09.229: INFO: 
+Logging pods the kubelet thinks is on node kubernetes-cluster-2696-minion-0 before test
+May  6 08:47:09.245: INFO: calico-node-glwdg from kube-system started at 2019-05-06 07:08:43 +0000 UTC (2 container statuses recorded)
+May  6 08:47:09.246: INFO:  Container calico-node ready: true, restart count 0
+May  6 08:47:09.246: INFO:  Container install-cni ready: true, restart count 0
+May  6 08:47:09.246: INFO: sonobuoy from heptio-sonobuoy started at 2019-05-06 07:26:42 +0000 UTC (1 container statuses recorded)
+May  6 08:47:09.246: INFO:  Container kube-sonobuoy ready: true, restart count 0
+May  6 08:47:09.246: INFO: sonobuoy-e2e-job-a8abc7b1c06240a4 from heptio-sonobuoy started at 2019-05-06 07:26:49 +0000 UTC (2 container statuses recorded)
+May  6 08:47:09.246: INFO:  Container e2e ready: true, restart count 0
+May  6 08:47:09.246: INFO:  Container sonobuoy-worker ready: true, restart count 0
+May  6 08:47:09.246: INFO: calico-kube-controllers-79f9487886-28vsz from kube-system started at 2019-05-06 07:08:55 +0000 UTC (1 container statuses recorded)
+May  6 08:47:09.246: INFO:  Container calico-kube-controllers ready: true, restart count 0
+May  6 08:47:09.246: INFO: sonobuoy-systemd-logs-daemon-set-03c53cfc64d4424c-sq9gg from heptio-sonobuoy started at 2019-05-06 07:26:49 +0000 UTC (2 container statuses recorded)
+May  6 08:47:09.246: INFO:  Container sonobuoy-systemd-logs-config ready: true, restart count 1
+May  6 08:47:09.246: INFO:  Container sonobuoy-worker ready: true, restart count 1
+[It] validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: verifying the node has the label node kubernetes-cluster-2696-minion-0
+May  6 08:47:09.279: INFO: Pod sonobuoy requesting resource cpu=0m on Node kubernetes-cluster-2696-minion-0
+May  6 08:47:09.279: INFO: Pod sonobuoy-e2e-job-a8abc7b1c06240a4 requesting resource cpu=0m on Node kubernetes-cluster-2696-minion-0
+May  6 08:47:09.279: INFO: Pod sonobuoy-systemd-logs-daemon-set-03c53cfc64d4424c-sq9gg requesting resource cpu=0m on Node kubernetes-cluster-2696-minion-0
+May  6 08:47:09.279: INFO: Pod calico-kube-controllers-79f9487886-28vsz requesting resource cpu=0m on Node kubernetes-cluster-2696-minion-0
+May  6 08:47:09.279: INFO: Pod calico-node-glwdg requesting resource cpu=250m on Node kubernetes-cluster-2696-minion-0
+STEP: Starting Pods to consume most of the cluster CPU.
+STEP: Creating another pod that requires unavailable amount of CPU.
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-88a8d692-6fdb-11e9-a235-ba138c0d9035.159c0bb1c43deb12], Reason = [Scheduled], Message = [Successfully assigned e2e-tests-sched-pred-9x55p/filler-pod-88a8d692-6fdb-11e9-a235-ba138c0d9035 to kubernetes-cluster-2696-minion-0]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-88a8d692-6fdb-11e9-a235-ba138c0d9035.159c0bb20430ae45], Reason = [Pulled], Message = [Container image "k8s.gcr.io/pause:3.1" already present on machine]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-88a8d692-6fdb-11e9-a235-ba138c0d9035.159c0bb20b7b9b64], Reason = [Created], Message = [Created container]
+STEP: Considering event: 
+Type = [Normal], Name = [filler-pod-88a8d692-6fdb-11e9-a235-ba138c0d9035.159c0bb214d3d397], Reason = [Started], Message = [Started container]
+STEP: Considering event: 
+Type = [Warning], Name = [additional-pod.159c0bb2b3deb9f7], Reason = [FailedScheduling], Message = [0/2 nodes are available: 1 Insufficient cpu, 1 node(s) had taints that the pod didn't tolerate.]
+STEP: removing the label node off the node kubernetes-cluster-2696-minion-0
+STEP: verifying the node doesn't have the label node
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:47:14.340: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-sched-pred-9x55p" for this suite.
+May  6 08:47:20.361: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:47:20.448: INFO: namespace: e2e-tests-sched-pred-9x55p, resource: bindings, ignored listing per whitelist
+May  6 08:47:20.517: INFO: namespace e2e-tests-sched-pred-9x55p deletion completed in 6.170940934s
+[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/predicates.go:70
+
+• [SLOW TEST:11.461 seconds]
+[sig-scheduling] SchedulerPredicates [Serial]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/scheduling/framework.go:22
+  validates resource limits of pods that are allowed to run  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Update Demo 
+  should do a rolling update of a replication controller  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:47:20.521: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[BeforeEach] [k8s.io] Update Demo
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:295
+[It] should do a rolling update of a replication controller  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the initial replication controller
+May  6 08:47:20.682: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 create -f - --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:20.961: INFO: stderr: ""
+May  6 08:47:20.961: INFO: stdout: "replicationcontroller/update-demo-nautilus created\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May  6 08:47:20.961: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:21.133: INFO: stderr: ""
+May  6 08:47:21.133: INFO: stdout: "update-demo-nautilus-2zl7h update-demo-nautilus-zjs98 "
+May  6 08:47:21.133: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-2zl7h -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:21.252: INFO: stderr: ""
+May  6 08:47:21.252: INFO: stdout: ""
+May  6 08:47:21.252: INFO: update-demo-nautilus-2zl7h is created but not running
+May  6 08:47:26.253: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:26.366: INFO: stderr: ""
+May  6 08:47:26.366: INFO: stdout: "update-demo-nautilus-2zl7h update-demo-nautilus-zjs98 "
+May  6 08:47:26.366: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-2zl7h -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:26.493: INFO: stderr: ""
+May  6 08:47:26.493: INFO: stdout: "true"
+May  6 08:47:26.493: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-2zl7h -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:26.623: INFO: stderr: ""
+May  6 08:47:26.623: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May  6 08:47:26.623: INFO: validating pod update-demo-nautilus-2zl7h
+May  6 08:47:26.631: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May  6 08:47:26.631: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May  6 08:47:26.631: INFO: update-demo-nautilus-2zl7h is verified up and running
+May  6 08:47:26.631: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-zjs98 -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:26.748: INFO: stderr: ""
+May  6 08:47:26.748: INFO: stdout: "true"
+May  6 08:47:26.748: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-nautilus-zjs98 -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:26.888: INFO: stderr: ""
+May  6 08:47:26.888: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/nautilus:1.0"
+May  6 08:47:26.888: INFO: validating pod update-demo-nautilus-zjs98
+May  6 08:47:26.894: INFO: got data: {
+  "image": "nautilus.jpg"
+}
+
+May  6 08:47:26.894: INFO: Unmarshalled json jpg/img => {nautilus.jpg} , expecting nautilus.jpg .
+May  6 08:47:26.895: INFO: update-demo-nautilus-zjs98 is verified up and running
+STEP: rolling-update to new replication controller
+May  6 08:47:26.898: INFO: scanned /root for discovery docs: 
+May  6 08:47:26.898: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 rolling-update update-demo-nautilus --update-period=1s -f - --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:50.557: INFO: stderr: "Command \"rolling-update\" is deprecated, use \"rollout\" instead\n"
+May  6 08:47:50.557: INFO: stdout: "Created update-demo-kitten\nScaling up update-demo-kitten from 0 to 2, scaling down update-demo-nautilus from 2 to 0 (keep 2 pods available, don't exceed 3 pods)\nScaling update-demo-kitten up to 1\nScaling update-demo-nautilus down to 1\nScaling update-demo-kitten up to 2\nScaling update-demo-nautilus down to 0\nUpdate succeeded. Deleting old controller: update-demo-nautilus\nRenaming update-demo-kitten to update-demo-nautilus\nreplicationcontroller/update-demo-nautilus rolling updated\n"
+STEP: waiting for all containers in name=update-demo pods to come up.
+May  6 08:47:50.557: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods -o template --template={{range.items}}{{.metadata.name}} {{end}} -l name=update-demo --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:50.686: INFO: stderr: ""
+May  6 08:47:50.686: INFO: stdout: "update-demo-kitten-bg55k update-demo-kitten-z9nrg "
+May  6 08:47:50.686: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-kitten-bg55k -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:50.803: INFO: stderr: ""
+May  6 08:47:50.803: INFO: stdout: "true"
+May  6 08:47:50.803: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-kitten-bg55k -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:50.924: INFO: stderr: ""
+May  6 08:47:50.924: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0"
+May  6 08:47:50.924: INFO: validating pod update-demo-kitten-bg55k
+May  6 08:47:50.930: INFO: got data: {
+  "image": "kitten.jpg"
+}
+
+May  6 08:47:50.930: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg .
+May  6 08:47:50.930: INFO: update-demo-kitten-bg55k is verified up and running
+May  6 08:47:50.930: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-kitten-z9nrg -o template --template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "update-demo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:51.053: INFO: stderr: ""
+May  6 08:47:51.053: INFO: stdout: "true"
+May  6 08:47:51.053: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 get pods update-demo-kitten-z9nrg -o template --template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "update-demo"}}{{.image}}{{end}}{{end}}{{end}} --namespace=e2e-tests-kubectl-tdhmc'
+May  6 08:47:51.179: INFO: stderr: ""
+May  6 08:47:51.179: INFO: stdout: "gcr.io/kubernetes-e2e-test-images/kitten:1.0"
+May  6 08:47:51.179: INFO: validating pod update-demo-kitten-z9nrg
+May  6 08:47:51.184: INFO: got data: {
+  "image": "kitten.jpg"
+}
+
+May  6 08:47:51.184: INFO: Unmarshalled json jpg/img => {kitten.jpg} , expecting kitten.jpg .
+May  6 08:47:51.184: INFO: update-demo-kitten-z9nrg is verified up and running
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:47:51.184: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-tdhmc" for this suite.
+May  6 08:48:13.206: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:48:13.248: INFO: namespace: e2e-tests-kubectl-tdhmc, resource: bindings, ignored listing per whitelist
+May  6 08:48:13.365: INFO: namespace e2e-tests-kubectl-tdhmc deletion completed in 22.176418651s
+
+• [SLOW TEST:52.844 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Update Demo
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should do a rolling update of a replication controller  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Docker Containers 
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:48:13.366: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename containers
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test override arguments
+May  6 08:48:13.491: INFO: Waiting up to 5m0s for pod "client-containers-aeec6e79-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-containers-g69cs" to be "success or failure"
+May  6 08:48:13.497: INFO: Pod "client-containers-aeec6e79-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.162613ms
+May  6 08:48:15.508: INFO: Pod "client-containers-aeec6e79-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.017628623s
+STEP: Saw pod success
+May  6 08:48:15.509: INFO: Pod "client-containers-aeec6e79-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:48:15.511: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod client-containers-aeec6e79-6fdb-11e9-a235-ba138c0d9035 container test-container: 
+STEP: delete the pod
+May  6 08:48:15.538: INFO: Waiting for pod client-containers-aeec6e79-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:48:15.544: INFO: Pod client-containers-aeec6e79-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [k8s.io] Docker Containers
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:48:15.545: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-containers-g69cs" for this suite.
+May  6 08:48:21.568: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:48:21.637: INFO: namespace: e2e-tests-containers-g69cs, resource: bindings, ignored listing per whitelist
+May  6 08:48:21.678: INFO: namespace e2e-tests-containers-g69cs deletion completed in 6.128966776s
+
+• [SLOW TEST:8.313 seconds]
+[k8s.io] Docker Containers
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:48:21.680: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 08:48:21.806: INFO: Waiting up to 5m0s for pod "downwardapi-volume-b3e263f3-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-7s558" to be "success or failure"
+May  6 08:48:21.822: INFO: Pod "downwardapi-volume-b3e263f3-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 15.795669ms
+May  6 08:48:23.830: INFO: Pod "downwardapi-volume-b3e263f3-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.02397091s
+STEP: Saw pod success
+May  6 08:48:23.830: INFO: Pod "downwardapi-volume-b3e263f3-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:48:23.837: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-b3e263f3-6fdb-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 08:48:23.861: INFO: Waiting for pod downwardapi-volume-b3e263f3-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:48:23.868: INFO: Pod downwardapi-volume-b3e263f3-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:48:23.868: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-7s558" for this suite.
+May  6 08:48:29.892: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:48:29.924: INFO: namespace: e2e-tests-downward-api-7s558, resource: bindings, ignored listing per whitelist
+May  6 08:48:30.009: INFO: namespace e2e-tests-downward-api-7s558 deletion completed in 6.135450343s
+
+• [SLOW TEST:8.328 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu request [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-network] DNS 
+  should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] DNS
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:48:30.009: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename dns
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Running these commands on wheezy: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default;check="$$(dig +tcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@kubernetes.default.svc.cluster.local;test -n "$$(getent hosts dns-querier-1.dns-test-service.e2e-tests-dns-wrr99.svc.cluster.local)" && echo OK > /results/wheezy_hosts@dns-querier-1.dns-test-service.e2e-tests-dns-wrr99.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/wheezy_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-wrr99.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/wheezy_tcp@PodARecord;sleep 1; done
+
+STEP: Running these commands on jessie: for i in `seq 1 600`; do check="$$(dig +notcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default;check="$$(dig +tcp +noall +answer +search kubernetes.default A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc;check="$$(dig +notcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_udp@kubernetes.default.svc.cluster.local;check="$$(dig +tcp +noall +answer +search kubernetes.default.svc.cluster.local A)" && test -n "$$check" && echo OK > /results/jessie_tcp@kubernetes.default.svc.cluster.local;test -n "$$(getent hosts dns-querier-1.dns-test-service.e2e-tests-dns-wrr99.svc.cluster.local)" && echo OK > /results/jessie_hosts@dns-querier-1.dns-test-service.e2e-tests-dns-wrr99.svc.cluster.local;test -n "$$(getent hosts dns-querier-1)" && echo OK > /results/jessie_hosts@dns-querier-1;podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".e2e-tests-dns-wrr99.pod.cluster.local"}');check="$$(dig +notcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_udp@PodARecord;check="$$(dig +tcp +noall +answer +search $${podARec} A)" && test -n "$$check" && echo OK > /results/jessie_tcp@PodARecord;sleep 1; done
+
+STEP: creating a pod to probe DNS
+STEP: submitting the pod to kubernetes
+STEP: retrieving the pod
+STEP: looking for the results for each expected name from probers
+May  6 08:48:34.344: INFO: DNS probes using e2e-tests-dns-wrr99/dns-test-b8deab44-6fdb-11e9-a235-ba138c0d9035 succeeded
+
+STEP: deleting the pod
+[AfterEach] [sig-network] DNS
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:48:34.367: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-dns-wrr99" for this suite.
+May  6 08:48:40.397: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:48:40.439: INFO: namespace: e2e-tests-dns-wrr99, resource: bindings, ignored listing per whitelist
+May  6 08:48:40.563: INFO: namespace e2e-tests-dns-wrr99 deletion completed in 6.191704853s
+
+• [SLOW TEST:10.554 seconds]
+[sig-network] DNS
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should provide DNS for the cluster  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSS
+------------------------------
+[sig-apps] Daemon set [Serial] 
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:48:40.564: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename daemonsets
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:102
+[It] should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating simple DaemonSet "daemon-set"
+STEP: Check that daemon pods launch on every node of the cluster.
+May  6 08:48:40.715: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:40.726: INFO: Number of nodes with available pods: 0
+May  6 08:48:40.726: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:41.751: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:41.755: INFO: Number of nodes with available pods: 0
+May  6 08:48:41.755: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:42.734: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:42.740: INFO: Number of nodes with available pods: 1
+May  6 08:48:42.740: INFO: Number of running nodes: 1, number of available pods: 1
+STEP: Stop a daemon pod, check that the daemon pod is revived.
+May  6 08:48:42.761: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:42.780: INFO: Number of nodes with available pods: 0
+May  6 08:48:42.780: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:43.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:43.789: INFO: Number of nodes with available pods: 0
+May  6 08:48:43.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:44.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:44.789: INFO: Number of nodes with available pods: 0
+May  6 08:48:44.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:45.788: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:45.795: INFO: Number of nodes with available pods: 0
+May  6 08:48:45.795: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:46.786: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:46.790: INFO: Number of nodes with available pods: 0
+May  6 08:48:46.790: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:47.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:47.788: INFO: Number of nodes with available pods: 0
+May  6 08:48:47.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:48.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:48.789: INFO: Number of nodes with available pods: 0
+May  6 08:48:48.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:49.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:49.789: INFO: Number of nodes with available pods: 0
+May  6 08:48:49.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:50.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:50.788: INFO: Number of nodes with available pods: 0
+May  6 08:48:50.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:52.154: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:52.234: INFO: Number of nodes with available pods: 0
+May  6 08:48:52.234: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:52.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:52.788: INFO: Number of nodes with available pods: 0
+May  6 08:48:52.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:53.786: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:53.789: INFO: Number of nodes with available pods: 0
+May  6 08:48:53.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:54.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:54.788: INFO: Number of nodes with available pods: 0
+May  6 08:48:54.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:55.784: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:55.788: INFO: Number of nodes with available pods: 0
+May  6 08:48:55.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:56.787: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:56.791: INFO: Number of nodes with available pods: 0
+May  6 08:48:56.791: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:57.787: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:57.800: INFO: Number of nodes with available pods: 0
+May  6 08:48:57.800: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:58.787: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:58.791: INFO: Number of nodes with available pods: 0
+May  6 08:48:58.791: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:48:59.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:48:59.794: INFO: Number of nodes with available pods: 0
+May  6 08:48:59.794: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:00.784: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:00.788: INFO: Number of nodes with available pods: 0
+May  6 08:49:00.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:01.786: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:01.790: INFO: Number of nodes with available pods: 0
+May  6 08:49:01.791: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:02.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:02.788: INFO: Number of nodes with available pods: 0
+May  6 08:49:02.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:03.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:03.793: INFO: Number of nodes with available pods: 0
+May  6 08:49:03.793: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:04.797: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:04.802: INFO: Number of nodes with available pods: 0
+May  6 08:49:04.802: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:05.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:05.789: INFO: Number of nodes with available pods: 0
+May  6 08:49:05.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:06.786: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:06.789: INFO: Number of nodes with available pods: 0
+May  6 08:49:06.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:07.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:07.788: INFO: Number of nodes with available pods: 0
+May  6 08:49:07.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:08.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:08.788: INFO: Number of nodes with available pods: 0
+May  6 08:49:08.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:09.786: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:09.790: INFO: Number of nodes with available pods: 0
+May  6 08:49:09.790: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:10.786: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:10.790: INFO: Number of nodes with available pods: 0
+May  6 08:49:10.790: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:11.786: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:11.789: INFO: Number of nodes with available pods: 0
+May  6 08:49:11.790: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:12.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:12.789: INFO: Number of nodes with available pods: 0
+May  6 08:49:12.789: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:13.784: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:13.787: INFO: Number of nodes with available pods: 0
+May  6 08:49:13.787: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:14.784: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:14.787: INFO: Number of nodes with available pods: 0
+May  6 08:49:14.787: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:15.787: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:15.791: INFO: Number of nodes with available pods: 0
+May  6 08:49:15.791: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:16.784: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:16.787: INFO: Number of nodes with available pods: 0
+May  6 08:49:16.787: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:17.784: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:17.788: INFO: Number of nodes with available pods: 0
+May  6 08:49:17.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:18.798: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:18.805: INFO: Number of nodes with available pods: 0
+May  6 08:49:18.805: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:19.785: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:19.788: INFO: Number of nodes with available pods: 0
+May  6 08:49:19.788: INFO: Node kubernetes-cluster-2696-minion-0 is running more than one daemon pod
+May  6 08:49:20.788: INFO: DaemonSet pods can't tolerate node kubernetes-cluster-2696-master-0 with taints [{Key:CriticalAddonsOnly Value:True Effect:NoSchedule TimeAdded:} {Key:dedicated Value:master Effect:NoSchedule TimeAdded:}], skip checking this node
+May  6 08:49:20.793: INFO: Number of nodes with available pods: 1
+May  6 08:49:20.793: INFO: Number of running nodes: 1, number of available pods: 1
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/daemon_set.go:68
+STEP: Deleting DaemonSet "daemon-set"
+STEP: deleting DaemonSet.extensions daemon-set in namespace e2e-tests-daemonsets-fd822, will wait for the garbage collector to delete the pods
+May  6 08:49:20.862: INFO: Deleting DaemonSet.extensions daemon-set took: 11.607567ms
+May  6 08:49:20.962: INFO: Terminating DaemonSet.extensions daemon-set pods took: 100.524884ms
+May  6 08:49:54.766: INFO: Number of nodes with available pods: 0
+May  6 08:49:54.766: INFO: Number of running nodes: 0, number of available pods: 0
+May  6 08:49:54.769: INFO: daemonset: {"kind":"DaemonSetList","apiVersion":"apps/v1","metadata":{"selfLink":"/apis/apps/v1/namespaces/e2e-tests-daemonsets-fd822/daemonsets","resourceVersion":"23779"},"items":null}
+
+May  6 08:49:54.771: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/e2e-tests-daemonsets-fd822/pods","resourceVersion":"23779"},"items":null}
+
+[AfterEach] [sig-apps] Daemon set [Serial]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:49:54.777: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-daemonsets-fd822" for this suite.
+May  6 08:50:00.794: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:50:00.903: INFO: namespace: e2e-tests-daemonsets-fd822, resource: bindings, ignored listing per whitelist
+May  6 08:50:00.938: INFO: namespace e2e-tests-daemonsets-fd822 deletion completed in 6.157692444s
+
+• [SLOW TEST:80.375 seconds]
+[sig-apps] Daemon set [Serial]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  should run and stop simple daemon [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:50:00.939: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-ef12469c-6fdb-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:50:01.127: INFO: Waiting up to 5m0s for pod "pod-configmaps-ef13fb50-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-dhqtq" to be "success or failure"
+May  6 08:50:01.150: INFO: Pod "pod-configmaps-ef13fb50-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 23.061675ms
+May  6 08:50:03.154: INFO: Pod "pod-configmaps-ef13fb50-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.026932222s
+STEP: Saw pod success
+May  6 08:50:03.154: INFO: Pod "pod-configmaps-ef13fb50-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:50:03.158: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-ef13fb50-6fdb-11e9-a235-ba138c0d9035 container configmap-volume-test: 
+STEP: delete the pod
+May  6 08:50:03.185: INFO: Waiting for pod pod-configmaps-ef13fb50-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:50:03.188: INFO: Pod pod-configmaps-ef13fb50-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:50:03.188: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-dhqtq" for this suite.
+May  6 08:50:09.207: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:50:09.261: INFO: namespace: e2e-tests-configmap-dhqtq, resource: bindings, ignored listing per whitelist
+May  6 08:50:09.357: INFO: namespace e2e-tests-configmap-dhqtq deletion completed in 6.164902027s
+
+• [SLOW TEST:8.417 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:50:09.357: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename init-container
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+May  6 08:50:09.523: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:50:13.303: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-tvd6z" for this suite.
+May  6 08:50:19.322: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:50:19.408: INFO: namespace: e2e-tests-init-container-tvd6z, resource: bindings, ignored listing per whitelist
+May  6 08:50:19.465: INFO: namespace e2e-tests-init-container-tvd6z deletion completed in 6.157320289s
+
+• [SLOW TEST:10.107 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should not start app containers and fail the pod if init containers fail on a RestartNever pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:50:19.466: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-fa172dbe-6fdb-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:50:19.603: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-fa17ed95-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-gwv6p" to be "success or failure"
+May  6 08:50:19.614: INFO: Pod "pod-projected-configmaps-fa17ed95-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 9.924034ms
+May  6 08:50:21.620: INFO: Pod "pod-projected-configmaps-fa17ed95-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016685146s
+STEP: Saw pod success
+May  6 08:50:21.620: INFO: Pod "pod-projected-configmaps-fa17ed95-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:50:21.625: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-configmaps-fa17ed95-6fdb-11e9-a235-ba138c0d9035 container projected-configmap-volume-test: 
+STEP: delete the pod
+May  6 08:50:21.652: INFO: Waiting for pod pod-projected-configmaps-fa17ed95-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:50:21.660: INFO: Pod pod-projected-configmaps-fa17ed95-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:50:21.660: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-gwv6p" for this suite.
+May  6 08:50:27.685: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:50:27.852: INFO: namespace: e2e-tests-projected-gwv6p, resource: bindings, ignored listing per whitelist
+May  6 08:50:27.889: INFO: namespace e2e-tests-projected-gwv6p deletion completed in 6.224953643s
+
+• [SLOW TEST:8.423 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:50:27.889: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 08:50:28.150: INFO: Waiting up to 5m0s for pod "downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-dn4qr" to be "success or failure"
+May  6 08:50:28.171: INFO: Pod "downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 20.564525ms
+May  6 08:50:30.182: INFO: Pod "downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.031942342s
+May  6 08:50:32.201: INFO: Pod "downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.051277092s
+STEP: Saw pod success
+May  6 08:50:32.202: INFO: Pod "downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:50:32.218: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 08:50:32.293: INFO: Waiting for pod downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035 to disappear
+May  6 08:50:32.312: INFO: Pod downwardapi-volume-ff30614f-6fdb-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:50:32.312: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-dn4qr" for this suite.
+May  6 08:50:38.361: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:50:38.440: INFO: namespace: e2e-tests-downward-api-dn4qr, resource: bindings, ignored listing per whitelist
+May  6 08:50:38.475: INFO: namespace e2e-tests-downward-api-dn4qr deletion completed in 6.146278022s
+
+• [SLOW TEST:10.586 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should provide container's cpu limit [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:50:38.478: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-map-056cbdcf-6fdc-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:50:38.615: INFO: Waiting up to 5m0s for pod "pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-xstml" to be "success or failure"
+May  6 08:50:38.623: INFO: Pod "pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.221468ms
+May  6 08:50:40.629: INFO: Pod "pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.014161831s
+May  6 08:50:42.635: INFO: Pod "pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019780555s
+STEP: Saw pod success
+May  6 08:50:42.635: INFO: Pod "pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:50:42.642: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035 container secret-volume-test: 
+STEP: delete the pod
+May  6 08:50:42.665: INFO: Waiting for pod pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035 to disappear
+May  6 08:50:42.668: INFO: Pod pod-secrets-056dad0f-6fdc-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:50:42.668: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-xstml" for this suite.
+May  6 08:50:48.691: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:50:48.778: INFO: namespace: e2e-tests-secrets-xstml, resource: bindings, ignored listing per whitelist
+May  6 08:50:48.844: INFO: namespace e2e-tests-secrets-xstml deletion completed in 6.173178466s
+
+• [SLOW TEST:10.367 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-network] Networking Granular Checks: Pods 
+  should function for node-pod communication: udp [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Networking
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:50:48.845: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename pod-network-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should function for node-pod communication: udp [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Performing setup for networking test in namespace e2e-tests-pod-network-test-k7clh
+STEP: creating a selector
+STEP: Creating the service pods in kubernetes
+May  6 08:50:49.031: INFO: Waiting up to 10m0s for all (but 0) nodes to be schedulable
+STEP: Creating test pods
+May  6 08:51:13.133: INFO: ExecWithOptions {Command:[/bin/sh -c echo 'hostName' | nc -w 1 -u 10.100.112.93 8081 | grep -v '^\s*$'] Namespace:e2e-tests-pod-network-test-k7clh PodName:host-test-container-pod ContainerName:hostexec Stdin: CaptureStdout:true CaptureStderr:true PreserveWhitespace:false}
+May  6 08:51:13.135: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+May  6 08:51:14.339: INFO: Found all expected endpoints: [netserver-0]
+[AfterEach] [sig-network] Networking
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:51:14.339: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pod-network-test-k7clh" for this suite.
+May  6 08:51:36.364: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:51:36.446: INFO: namespace: e2e-tests-pod-network-test-k7clh, resource: bindings, ignored listing per whitelist
+May  6 08:51:36.494: INFO: namespace e2e-tests-pod-network-test-k7clh deletion completed in 22.150904728s
+
+• [SLOW TEST:47.650 seconds]
+[sig-network] Networking
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:25
+  Granular Checks: Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/networking.go:28
+    should function for node-pod communication: udp [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] ConfigMap 
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:51:36.496: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-upd-28042969-6fdc-11e9-a235-ba138c0d9035
+STEP: Creating the pod
+STEP: Updating configmap configmap-test-upd-28042969-6fdc-11e9-a235-ba138c0d9035
+STEP: waiting to observe update in volume
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:51:40.729: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-6zzfz" for this suite.
+May  6 08:52:02.751: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:52:02.799: INFO: namespace: e2e-tests-configmap-6zzfz, resource: bindings, ignored listing per whitelist
+May  6 08:52:02.884: INFO: namespace e2e-tests-configmap-6zzfz deletion completed in 22.150240793s
+
+• [SLOW TEST:26.388 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  updates should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[k8s.io] Variable Expansion 
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:52:02.887: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename var-expansion
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test env composition
+May  6 08:52:03.071: INFO: Waiting up to 5m0s for pod "var-expansion-37c5101d-6fdc-11e9-a235-ba138c0d9035" in namespace "e2e-tests-var-expansion-5dmld" to be "success or failure"
+May  6 08:52:03.079: INFO: Pod "var-expansion-37c5101d-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.607325ms
+May  6 08:52:05.095: INFO: Pod "var-expansion-37c5101d-6fdc-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.023266722s
+STEP: Saw pod success
+May  6 08:52:05.095: INFO: Pod "var-expansion-37c5101d-6fdc-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:52:05.098: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod var-expansion-37c5101d-6fdc-11e9-a235-ba138c0d9035 container dapi-container: 
+STEP: delete the pod
+May  6 08:52:05.122: INFO: Waiting for pod var-expansion-37c5101d-6fdc-11e9-a235-ba138c0d9035 to disappear
+May  6 08:52:05.126: INFO: Pod var-expansion-37c5101d-6fdc-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [k8s.io] Variable Expansion
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:52:05.126: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-var-expansion-5dmld" for this suite.
+May  6 08:52:11.153: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:52:11.233: INFO: namespace: e2e-tests-var-expansion-5dmld, resource: bindings, ignored listing per whitelist
+May  6 08:52:11.285: INFO: namespace e2e-tests-var-expansion-5dmld deletion completed in 6.153955449s
+
+• [SLOW TEST:8.399 seconds]
+[k8s.io] Variable Expansion
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should allow composing env vars into new env vars [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSS
+------------------------------
+[sig-apps] Deployment 
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:52:11.286: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename deployment
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:65
+[It] deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:52:11.392: INFO: Creating deployment "nginx-deployment"
+May  6 08:52:11.398: INFO: Waiting for observed generation 1
+May  6 08:52:13.412: INFO: Waiting for all required pods to come up
+May  6 08:52:13.418: INFO: Pod name nginx: Found 10 pods out of 10
+STEP: ensuring each pod is running
+May  6 08:52:19.437: INFO: Waiting for deployment "nginx-deployment" to complete
+May  6 08:52:19.445: INFO: Updating deployment "nginx-deployment" with a non-existent image
+May  6 08:52:19.456: INFO: Updating deployment nginx-deployment
+May  6 08:52:19.456: INFO: Waiting for observed generation 2
+May  6 08:52:21.517: INFO: Waiting for the first rollout's replicaset to have .status.availableReplicas = 8
+May  6 08:52:21.521: INFO: Waiting for the first rollout's replicaset to have .spec.replicas = 8
+May  6 08:52:21.525: INFO: Waiting for the first rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+May  6 08:52:21.548: INFO: Verifying that the second rollout's replicaset has .status.availableReplicas = 0
+May  6 08:52:21.549: INFO: Waiting for the second rollout's replicaset to have .spec.replicas = 5
+May  6 08:52:21.553: INFO: Waiting for the second rollout's replicaset of deployment "nginx-deployment" to have desired number of replicas
+May  6 08:52:21.566: INFO: Verifying that deployment "nginx-deployment" has minimum required number of available replicas
+May  6 08:52:21.566: INFO: Scaling up the deployment "nginx-deployment" from 10 to 30
+May  6 08:52:21.582: INFO: Updating deployment nginx-deployment
+May  6 08:52:21.582: INFO: Waiting for the replicasets of deployment "nginx-deployment" to have desired number of replicas
+May  6 08:52:21.611: INFO: Verifying that first rollout's replicaset has .spec.replicas = 20
+May  6 08:52:23.650: INFO: Verifying that second rollout's replicaset has .spec.replicas = 13
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/deployment.go:59
+May  6 08:52:23.660: INFO: Deployment "nginx-deployment":
+&Deployment{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment,GenerateName:,Namespace:e2e-tests-deployment-qh258,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-qh258/deployments/nginx-deployment,UID:3cbc14a2-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24704,Generation:3,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{deployment.kubernetes.io/revision: 2,},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:DeploymentSpec{Replicas:*30,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},Strategy:DeploymentStrategy{Type:RollingUpdate,RollingUpdate:&RollingUpdateDeployment{MaxUnavailable:2,MaxSurge:3,},},MinReadySeconds:0,RevisionHistoryLimit:*10,Paused:false,ProgressDeadlineSeconds:*600,},Status:DeploymentStatus{ObservedGeneration:3,Replicas:33,UpdatedReplicas:13,AvailableReplicas:8,UnavailableReplicas:25,Conditions:[{Available False 2019-05-06 08:52:21 +0000 UTC 2019-05-06 08:52:21 +0000 UTC MinimumReplicasUnavailable Deployment does not have minimum availability.} {Progressing True 2019-05-06 08:52:21 +0000 UTC 2019-05-06 08:52:11 +0000 UTC ReplicaSetUpdated ReplicaSet "nginx-deployment-65bbdb5f8" is progressing.}],ReadyReplicas:8,CollisionCount:nil,},}
+
+May  6 08:52:23.667: INFO: New ReplicaSet "nginx-deployment-65bbdb5f8" of Deployment "nginx-deployment":
+&ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8,GenerateName:,Namespace:e2e-tests-deployment-qh258,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-qh258/replicasets/nginx-deployment-65bbdb5f8,UID:418a5874-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24700,Generation:3,CreationTimestamp:2019-05-06 08:52:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 2,},OwnerReferences:[{apps/v1 Deployment nginx-deployment 3cbc14a2-6fdc-11e9-8e1b-fa163ee16beb 0xc0023da687 0xc0023da688}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*13,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:13,FullyLabeledReplicas:13,ObservedGeneration:3,ReadyReplicas:0,AvailableReplicas:0,Conditions:[],},}
+May  6 08:52:23.667: INFO: All old ReplicaSets of Deployment "nginx-deployment":
+May  6 08:52:23.667: INFO: &ReplicaSet{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965,GenerateName:,Namespace:e2e-tests-deployment-qh258,SelfLink:/apis/apps/v1/namespaces/e2e-tests-deployment-qh258/replicasets/nginx-deployment-555b55d965,UID:3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24682,Generation:3,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{deployment.kubernetes.io/desired-replicas: 30,deployment.kubernetes.io/max-replicas: 33,deployment.kubernetes.io/revision: 1,},OwnerReferences:[{apps/v1 Deployment nginx-deployment 3cbc14a2-6fdc-11e9-8e1b-fa163ee16beb 0xc0023da5c7 0xc0023da5c8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:ReplicaSetSpec{Replicas:*20,Selector:&k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{MatchLabels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},MatchExpressions:[],},Template:k8s_io_api_core_v1.PodTemplateSpec{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:,GenerateName:,Namespace:,SelfLink:,UID:,ResourceVersion:,Generation:0,CreationTimestamp:0001-01-01 00:00:00 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:,DeprecatedServiceAccount:,NodeName:,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:nil,},},MinReadySeconds:0,},Status:ReplicaSetStatus{Replicas:20,FullyLabeledReplicas:20,ObservedGeneration:3,ReadyReplicas:8,AvailableReplicas:8,Conditions:[],},}
+May  6 08:52:23.679: INFO: Pod "nginx-deployment-555b55d965-49xnr" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-49xnr,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-49xnr,UID:42cfde1d-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24673,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023db1c7 0xc0023db1c8}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.679: INFO: Pod "nginx-deployment-555b55d965-4kt98" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-4kt98,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-4kt98,UID:3cc741db-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24530,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023db340 0xc0023db341}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.83,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:16 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://247827ddfe14856d54544ea990d9d363ebae9bb6618c703c9523392734b671c6}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.679: INFO: Pod "nginx-deployment-555b55d965-6wdrl" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-6wdrl,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-6wdrl,UID:42d2f750-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24728,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023db440 0xc0023db441}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.679: INFO: Pod "nginx-deployment-555b55d965-74pzz" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-74pzz,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-74pzz,UID:42d6258a-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24744,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023db5e0 0xc0023db5e1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.680: INFO: Pod "nginx-deployment-555b55d965-djqkz" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-djqkz,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-djqkz,UID:3cc69e9d-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24527,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023db6d0 0xc0023db6d1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.96,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:16 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://89dc430c490855ea87e3928a42e8fb11c1643a2dbf3ef80fc7410d47bad3002b}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.680: INFO: Pod "nginx-deployment-555b55d965-f5tmz" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-f5tmz,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-f5tmz,UID:42dbf067-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24676,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023db920 0xc0023db921}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.680: INFO: Pod "nginx-deployment-555b55d965-fs55h" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-fs55h,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-fs55h,UID:42d64f4b-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24666,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023db9d0 0xc0023db9d1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.680: INFO: Pod "nginx-deployment-555b55d965-gcfdg" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-gcfdg,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-gcfdg,UID:3cc36e2b-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24538,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023dbbc0 0xc0023dbbc1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.89,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:16 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://4be954cdd261ae7ef23833caf2a60cb1cf34907bcb728e3bc8b38259cfbdb894}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.680: INFO: Pod "nginx-deployment-555b55d965-gnz9n" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-gnz9n,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-gnz9n,UID:3cc0158d-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24488,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023dbcc0 0xc0023dbcc1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:15 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:15 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.85,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:15 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://2b541333e6e8f21b84cf2530efff50e004ff21a796bf9a4d8f04fed86220f2cd}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.680: INFO: Pod "nginx-deployment-555b55d965-gth7s" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-gth7s,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-gth7s,UID:3cc14070-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24533,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc0023dbf00 0xc0023dbf01}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.88,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:16 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://fa83ececf107c9a3edcd2eaa0f51ebfedd68fbed2311e0a69feeec2b58a7afd3}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.680: INFO: Pod "nginx-deployment-555b55d965-kn42b" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-kn42b,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-kn42b,UID:42dbfff6-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24675,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08040 0xc001d08041}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.681: INFO: Pod "nginx-deployment-555b55d965-mrxr5" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-mrxr5,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-mrxr5,UID:42d37549-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24703,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08160 0xc001d08161}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.681: INFO: Pod "nginx-deployment-555b55d965-pmrmr" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-pmrmr,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-pmrmr,UID:42daf1b9-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24671,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08280 0xc001d08281}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.681: INFO: Pod "nginx-deployment-555b55d965-psxn7" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-psxn7,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-psxn7,UID:3cc1a395-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24479,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d083a0 0xc001d083a1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:15 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:15 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.91,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:15 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://1f9d37c37c0d84f456e199676fe45fe99ed4a6c3b2a81d7e08cf2f6c1db78c20}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.681: INFO: Pod "nginx-deployment-555b55d965-qnklx" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-qnklx,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-qnklx,UID:42d6546a-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24741,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d084a0 0xc001d084a1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 docker.io/library/nginx:1.14-alpine  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.681: INFO: Pod "nginx-deployment-555b55d965-qvj47" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-qvj47,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-qvj47,UID:42d68206-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24655,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d085e0 0xc001d085e1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.681: INFO: Pod "nginx-deployment-555b55d965-r5wfx" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-r5wfx,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-r5wfx,UID:42dc1267-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24674,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08690 0xc001d08691}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.681: INFO: Pod "nginx-deployment-555b55d965-vlnn6" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-vlnn6,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-vlnn6,UID:3cc3f5ac-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24521,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08740 0xc001d08741}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.84,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:16 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://9603bb2393f0f09ac3f87db0df555db242253e579664def315f82fa25019dd4c}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-555b55d965-z2fjj" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-z2fjj,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-z2fjj,UID:42dbd223-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24677,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08850 0xc001d08851}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-555b55d965-zhxl2" is available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-555b55d965-zhxl2,GenerateName:nginx-deployment-555b55d965-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-555b55d965-zhxl2,UID:3cc75e2c-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24549,Generation:0,CreationTimestamp:2019-05-06 08:52:11 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 555b55d965,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-555b55d965 3cbcd96c-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08900 0xc001d08901}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx docker.io/library/nginx:1.14-alpine [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  } {Ready True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {ContainersReady True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:17 +0000 UTC  } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:11 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:10.100.112.94,StartTime:2019-05-06 08:52:11 +0000 UTC,ContainerStatuses:[{nginx {nil ContainerStateRunning{StartedAt:2019-05-06 08:52:16 +0000 UTC,} nil} {nil nil nil} true 0 nginx:1.14-alpine docker-pullable://nginx@sha256:485b610fefec7ff6c463ced9623314a04ed67e3945b9c08d7e53a47f6d108dc7 docker://d44b7ba38870d0f8d942d743f4e6fe406144f87f37573afe52102b0805d8ae8a}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-65bbdb5f8-2j99r" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-2j99r,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-2j99r,UID:42d5ba60-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24739,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d08c80 0xc001d08c81}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:21 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-65bbdb5f8-4dbgf" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-4dbgf,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-4dbgf,UID:42d85800-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24672,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09560 0xc001d09561}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-65bbdb5f8-4nftg" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-4nftg,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-4nftg,UID:418da6f1-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24594,Generation:0,CreationTimestamp:2019-05-06 08:52:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09620 0xc001d09621}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:19 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-65bbdb5f8-672ks" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-672ks,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-672ks,UID:42dda21a-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24685,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d097b0 0xc001d097b1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-65bbdb5f8-6t8bz" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-6t8bz,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-6t8bz,UID:42d713af-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24667,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09870 0xc001d09871}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-65bbdb5f8-7scw2" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-7scw2,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-7scw2,UID:418df7a5-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24604,Generation:0,CreationTimestamp:2019-05-06 08:52:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d099b0 0xc001d099b1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:19 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.682: INFO: Pod "nginx-deployment-65bbdb5f8-bcml4" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-bcml4,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-bcml4,UID:42ddf8d9-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24687,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09ac0 0xc001d09ac1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.683: INFO: Pod "nginx-deployment-65bbdb5f8-f8zgq" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-f8zgq,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-f8zgq,UID:419cf84f-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24611,Generation:0,CreationTimestamp:2019-05-06 08:52:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09b80 0xc001d09b81}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:19 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.683: INFO: Pod "nginx-deployment-65bbdb5f8-gjqw9" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-gjqw9,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-gjqw9,UID:418b7d70-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24573,Generation:0,CreationTimestamp:2019-05-06 08:52:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09c90 0xc001d09c91}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:19 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.683: INFO: Pod "nginx-deployment-65bbdb5f8-hl5lh" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-hl5lh,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-hl5lh,UID:419e8d1f-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24614,Generation:0,CreationTimestamp:2019-05-06 08:52:19 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09da0 0xc001d09da1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC ContainersNotReady containers with unready status: [nginx]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:19 +0000 UTC  }],Message:,Reason:,HostIP:10.0.0.19,PodIP:,StartTime:2019-05-06 08:52:19 +0000 UTC,ContainerStatuses:[{nginx {ContainerStateWaiting{Reason:ContainerCreating,Message:,} nil nil} {nil nil nil} false 0 nginx:404  }],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.685: INFO: Pod "nginx-deployment-65bbdb5f8-p4fc4" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-p4fc4,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-p4fc4,UID:42dd443d-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24683,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09eb0 0xc001d09eb1}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.685: INFO: Pod "nginx-deployment-65bbdb5f8-qvx9m" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-qvx9m,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-qvx9m,UID:42e45a2b-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24693,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc001d09f70 0xc001d09f71}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+May  6 08:52:23.685: INFO: Pod "nginx-deployment-65bbdb5f8-wj5zd" is not available:
+&Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:nginx-deployment-65bbdb5f8-wj5zd,GenerateName:nginx-deployment-65bbdb5f8-,Namespace:e2e-tests-deployment-qh258,SelfLink:/api/v1/namespaces/e2e-tests-deployment-qh258/pods/nginx-deployment-65bbdb5f8-wj5zd,UID:42dd4be8-6fdc-11e9-8e1b-fa163ee16beb,ResourceVersion:24689,Generation:0,CreationTimestamp:2019-05-06 08:52:21 +0000 UTC,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{name: nginx,pod-template-hash: 65bbdb5f8,},Annotations:map[string]string{},OwnerReferences:[{apps/v1 ReplicaSet nginx-deployment-65bbdb5f8 418a5874-6fdc-11e9-8e1b-fa163ee16beb 0xc000a68060 0xc000a68061}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{default-token-r8cs2 {nil nil nil nil nil SecretVolumeSource{SecretName:default-token-r8cs2,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{nginx nginx:404 [] []  [] [] [] {map[] map[]} [{default-token-r8cs2 true /var/run/secrets/kubernetes.io/serviceaccount  }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:kubernetes-cluster-2696-minion-0,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[],HostAliases:[],PriorityClassName:,Priority:nil,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],RuntimeClassName:nil,EnableServiceLinks:*true,},Status:PodStatus{Phase:Pending,Conditions:[{PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-06 08:52:21 +0000 UTC  }],Message:,Reason:,HostIP:,PodIP:,StartTime:,ContainerStatuses:[],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},}
+[AfterEach] [sig-apps] Deployment
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:52:23.685: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-deployment-qh258" for this suite.
+May  6 08:52:31.719: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:52:31.942: INFO: namespace: e2e-tests-deployment-qh258, resource: bindings, ignored listing per whitelist
+May  6 08:52:32.143: INFO: namespace e2e-tests-deployment-qh258 deletion completed in 8.450204469s
+
+• [SLOW TEST:20.858 seconds]
+[sig-apps] Deployment
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apps/framework.go:22
+  deployment should support proportional scaling [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:52:32.143: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-493c5641-6fdc-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:52:32.378: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-tbh8r" to be "success or failure"
+May  6 08:52:32.397: INFO: Pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 18.476425ms
+May  6 08:52:34.402: INFO: Pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.023614624s
+May  6 08:52:36.439: INFO: Pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 4.061202849s
+May  6 08:52:38.445: INFO: Pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.066392183s
+May  6 08:52:40.456: INFO: Pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.07752535s
+May  6 08:52:42.461: INFO: Pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 10.082989575s
+STEP: Saw pod success
+May  6 08:52:42.461: INFO: Pod "pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:52:42.465: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035 container projected-secret-volume-test: 
+STEP: delete the pod
+May  6 08:52:42.499: INFO: Waiting for pod pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035 to disappear
+May  6 08:52:42.503: INFO: Pod pod-projected-secrets-493cf46f-6fdc-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:52:42.503: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-tbh8r" for this suite.
+May  6 08:52:48.524: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:52:48.596: INFO: namespace: e2e-tests-projected-tbh8r, resource: bindings, ignored listing per whitelist
+May  6 08:52:48.649: INFO: namespace e2e-tests-projected-tbh8r deletion completed in 6.142001401s
+
+• [SLOW TEST:16.506 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:52:48.652: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename init-container
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+May  6 08:52:48.811: INFO: PodSpec: initContainers in spec.initContainers
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:52:52.776: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-q4z4j" for this suite.
+May  6 08:53:14.800: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:53:14.826: INFO: namespace: e2e-tests-init-container-q4z4j, resource: bindings, ignored listing per whitelist
+May  6 08:53:14.937: INFO: namespace e2e-tests-init-container-q4z4j deletion completed in 22.156349232s
+
+• [SLOW TEST:26.285 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should invoke init containers on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSS
+------------------------------
+[k8s.io] Probing container 
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:53:14.938: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename container-probe
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/container_probe.go:48
+[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Probing container
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:54:15.067: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-probe-rpc8c" for this suite.
+May  6 08:54:37.085: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:54:37.129: INFO: namespace: e2e-tests-container-probe-rpc8c, resource: bindings, ignored listing per whitelist
+May  6 08:54:37.234: INFO: namespace e2e-tests-container-probe-rpc8c deletion completed in 22.162776598s
+
+• [SLOW TEST:82.296 seconds]
+[k8s.io] Probing container
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:54:37.234: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-map-93be0ffc-6fdc-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:54:37.379: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-93be8bc6-6fdc-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-4tpqw" to be "success or failure"
+May  6 08:54:37.392: INFO: Pod "pod-projected-secrets-93be8bc6-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 12.131737ms
+May  6 08:54:39.398: INFO: Pod "pod-projected-secrets-93be8bc6-6fdc-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01880769s
+STEP: Saw pod success
+May  6 08:54:39.398: INFO: Pod "pod-projected-secrets-93be8bc6-6fdc-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:54:39.404: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-secrets-93be8bc6-6fdc-11e9-a235-ba138c0d9035 container projected-secret-volume-test: 
+STEP: delete the pod
+May  6 08:54:39.448: INFO: Waiting for pod pod-projected-secrets-93be8bc6-6fdc-11e9-a235-ba138c0d9035 to disappear
+May  6 08:54:39.452: INFO: Pod pod-projected-secrets-93be8bc6-6fdc-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:54:39.452: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-4tpqw" for this suite.
+May  6 08:54:45.469: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:54:45.514: INFO: namespace: e2e-tests-projected-4tpqw, resource: bindings, ignored listing per whitelist
+May  6 08:54:45.601: INFO: namespace e2e-tests-projected-4tpqw deletion completed in 6.143702011s
+
+• [SLOW TEST:8.368 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume with mappings and Item Mode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Projected configMap 
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:54:45.605: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name projected-configmap-test-volume-98b99c53-6fdc-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:54:45.740: INFO: Waiting up to 5m0s for pod "pod-projected-configmaps-98ba213d-6fdc-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-pxmrz" to be "success or failure"
+May  6 08:54:45.749: INFO: Pod "pod-projected-configmaps-98ba213d-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.99611ms
+May  6 08:54:47.753: INFO: Pod "pod-projected-configmaps-98ba213d-6fdc-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.01365089s
+STEP: Saw pod success
+May  6 08:54:47.754: INFO: Pod "pod-projected-configmaps-98ba213d-6fdc-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:54:47.757: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-configmaps-98ba213d-6fdc-11e9-a235-ba138c0d9035 container projected-configmap-volume-test: 
+STEP: delete the pod
+May  6 08:54:47.794: INFO: Waiting for pod pod-projected-configmaps-98ba213d-6fdc-11e9-a235-ba138c0d9035 to disappear
+May  6 08:54:47.797: INFO: Pod pod-projected-configmaps-98ba213d-6fdc-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected configMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:54:47.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-pxmrz" for this suite.
+May  6 08:54:53.815: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:54:53.874: INFO: namespace: e2e-tests-projected-pxmrz, resource: bindings, ignored listing per whitelist
+May  6 08:54:53.935: INFO: namespace e2e-tests-projected-pxmrz deletion completed in 6.134614009s
+
+• [SLOW TEST:8.330 seconds]
+[sig-storage] Projected configMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_configmap.go:34
+  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-storage] ConfigMap 
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:54:53.935: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-upd-9db4bd23-6fdc-11e9-a235-ba138c0d9035
+STEP: Creating the pod
+STEP: Waiting for pod with text data
+STEP: Waiting for pod with binary data
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:54:56.128: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-c5fc6" for this suite.
+May  6 08:55:18.163: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:55:18.210: INFO: namespace: e2e-tests-configmap-c5fc6, resource: bindings, ignored listing per whitelist
+May  6 08:55:18.289: INFO: namespace e2e-tests-configmap-c5fc6 deletion completed in 22.150011675s
+
+• [SLOW TEST:24.353 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  binary data should be reflected in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSS
+------------------------------
+[k8s.io] [sig-node] Pods Extended [k8s.io] Pods Set QOS Class 
+  should be submitted and removed  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:55:18.291: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/node/pods.go:204
+[It] should be submitted and removed  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+STEP: verifying QOS class is set on the pod
+[AfterEach] [k8s.io] [sig-node] Pods Extended
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:55:18.464: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-n2br9" for this suite.
+May  6 08:55:40.494: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:55:40.535: INFO: namespace: e2e-tests-pods-n2br9, resource: bindings, ignored listing per whitelist
+May  6 08:55:40.643: INFO: namespace e2e-tests-pods-n2br9 deletion completed in 22.171564441s
+
+• [SLOW TEST:22.352 seconds]
+[k8s.io] [sig-node] Pods Extended
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  [k8s.io] Pods Set QOS Class
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should be submitted and removed  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[k8s.io] InitContainer [NodeConformance] 
+  should not start app containers if init containers fail on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:55:40.643: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename init-container
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/init_container.go:43
+[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating the pod
+May  6 08:55:40.825: INFO: PodSpec: initContainers in spec.initContainers
+May  6 08:56:25.234: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-b9907109-6fdc-11e9-a235-ba138c0d9035", GenerateName:"", Namespace:"e2e-tests-init-container-8lhqv", SelfLink:"/api/v1/namespaces/e2e-tests-init-container-8lhqv/pods/pod-init-b9907109-6fdc-11e9-a235-ba138c0d9035", UID:"b99179f1-6fdc-11e9-8e1b-fa163ee16beb", ResourceVersion:"25873", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63692729740, loc:(*time.Location)(0x7b47ba0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"825018334"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:""}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-l68g6", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc0014e2a80), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-l68g6", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-l68g6", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.1", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}, "memory":resource.Quantity{i:resource.int64Amount{value:52428800, scale:0}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"52428800", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-l68g6", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil)}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc0023ca298), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"kubernetes-cluster-2696-minion-0", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc0023732c0), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration(nil), HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(nil), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc0023ca300)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692729740, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692729740, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692729740, loc:(*time.Location)(0x7b47ba0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63692729740, loc:(*time.Location)(0x7b47ba0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"10.0.0.19", PodIP:"10.100.112.66", StartTime:(*v1.Time)(0xc002a549c0), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc002600380)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0026003f0)}, Ready:false, RestartCount:3, Image:"busybox:1.29", ImageID:"docker-pullable://busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"docker://dfc9f0b57cc9fb48ef14f71dc43cc7ceb97c7eeea90bb159d01258dfc25a5272"}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc002a54a00), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:""}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc002a549e0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.1", ImageID:"", ContainerID:""}}, QOSClass:"Guaranteed"}}
+[AfterEach] [k8s.io] InitContainer [NodeConformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:56:25.236: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-init-container-8lhqv" for this suite.
+May  6 08:56:47.286: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:56:47.435: INFO: namespace: e2e-tests-init-container-8lhqv, resource: bindings, ignored listing per whitelist
+May  6 08:56:47.435: INFO: namespace e2e-tests-init-container-8lhqv deletion completed in 22.192799796s
+
+• [SLOW TEST:66.793 seconds]
+[k8s.io] InitContainer [NodeConformance]
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should not start app containers if init containers fail on a RestartAlways pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSS
+------------------------------
+[sig-api-machinery] CustomResourceDefinition resources Simple CustomResourceDefinition 
+  creating/deleting custom resource definition objects works  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:56:47.437: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename custom-resource-definition
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] creating/deleting custom resource definition objects works  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:56:47.601: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+[AfterEach] [sig-api-machinery] CustomResourceDefinition resources
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:56:48.673: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-custom-resource-definition-dmdn2" for this suite.
+May  6 08:56:54.695: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:56:54.803: INFO: namespace: e2e-tests-custom-resource-definition-dmdn2, resource: bindings, ignored listing per whitelist
+May  6 08:56:54.822: INFO: namespace e2e-tests-custom-resource-definition-dmdn2 deletion completed in 6.143920087s
+
+• [SLOW TEST:7.386 seconds]
+[sig-api-machinery] CustomResourceDefinition resources
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/framework.go:22
+  Simple CustomResourceDefinition
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go:35
+    creating/deleting custom resource definition objects works  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[sig-network] Proxy version v1 
+  should proxy logs on node with explicit kubelet port using proxy subresource  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] version v1
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:56:54.822: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename proxy
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should proxy logs on node with explicit kubelet port using proxy subresource  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:56:54.959: INFO: (0) /api/v1/nodes/kubernetes-cluster-2696-minion-0:10250/proxy/logs/: 
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+
+anaconda/
+audit/
+btmp
+>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-map-e9942bc1-6fdc-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:57:01.392: INFO: Waiting up to 5m0s for pod "pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-b5d7p" to be "success or failure"
+May  6 08:57:01.402: INFO: Pod "pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 8.960156ms
+May  6 08:57:03.406: INFO: Pod "pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.013338959s
+May  6 08:57:05.411: INFO: Pod "pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.018013445s
+STEP: Saw pod success
+May  6 08:57:05.411: INFO: Pod "pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:57:05.415: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035 container configmap-volume-test: 
+STEP: delete the pod
+May  6 08:57:05.437: INFO: Waiting for pod pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035 to disappear
+May  6 08:57:05.440: INFO: Pod pod-configmaps-e994eb92-6fdc-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:57:05.440: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-b5d7p" for this suite.
+May  6 08:57:11.460: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:57:11.562: INFO: namespace: e2e-tests-configmap-b5d7p, resource: bindings, ignored listing per whitelist
+May  6 08:57:11.597: INFO: namespace e2e-tests-configmap-b5d7p deletion completed in 6.153631045s
+
+• [SLOW TEST:10.329 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
+  should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:57:11.599: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubelet-test
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:37
+[BeforeEach] when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:81
+[It] should be possible to delete [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[AfterEach] [k8s.io] Kubelet
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:57:11.718: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubelet-test-6f5vg" for this suite.
+May  6 08:57:33.766: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:57:33.866: INFO: namespace: e2e-tests-kubelet-test-6f5vg, resource: bindings, ignored listing per whitelist
+May  6 08:57:33.882: INFO: namespace e2e-tests-kubelet-test-6f5vg deletion completed in 22.139427844s
+
+• [SLOW TEST:22.283 seconds]
+[k8s.io] Kubelet
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when scheduling a busybox command that always fails in a pod
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/kubelet.go:78
+    should be possible to delete [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-auth] ServiceAccounts 
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:57:33.885: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename svcaccounts
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: getting the auto-created API token
+STEP: Creating a pod to test consume service account token
+May  6 08:57:34.552: INFO: Waiting up to 5m0s for pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l" in namespace "e2e-tests-svcaccounts-rbv97" to be "success or failure"
+May  6 08:57:34.558: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l": Phase="Pending", Reason="", readiness=false. Elapsed: 6.214758ms
+May  6 08:57:36.568: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l": Phase="Pending", Reason="", readiness=false. Elapsed: 2.015538104s
+May  6 08:57:38.572: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.019831583s
+STEP: Saw pod success
+May  6 08:57:38.572: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l" satisfied condition "success or failure"
+May  6 08:57:38.577: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l container token-test: 
+STEP: delete the pod
+May  6 08:57:38.597: INFO: Waiting for pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l to disappear
+May  6 08:57:38.602: INFO: Pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qkb6l no longer exists
+STEP: Creating a pod to test consume service account root CA
+May  6 08:57:38.608: INFO: Waiting up to 5m0s for pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx" in namespace "e2e-tests-svcaccounts-rbv97" to be "success or failure"
+May  6 08:57:38.612: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx": Phase="Pending", Reason="", readiness=false. Elapsed: 3.426903ms
+May  6 08:57:40.617: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx": Phase="Pending", Reason="", readiness=false. Elapsed: 2.008684114s
+May  6 08:57:42.622: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.013614458s
+STEP: Saw pod success
+May  6 08:57:42.622: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx" satisfied condition "success or failure"
+May  6 08:57:42.628: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx container root-ca-test: 
+STEP: delete the pod
+May  6 08:57:42.664: INFO: Waiting for pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx to disappear
+May  6 08:57:42.668: INFO: Pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-qsmtx no longer exists
+STEP: Creating a pod to test consume service account namespace
+May  6 08:57:42.676: INFO: Waiting up to 5m0s for pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-mxdvv" in namespace "e2e-tests-svcaccounts-rbv97" to be "success or failure"
+May  6 08:57:42.684: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-mxdvv": Phase="Pending", Reason="", readiness=false. Elapsed: 7.833642ms
+May  6 08:57:44.690: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-mxdvv": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014332926s
+STEP: Saw pod success
+May  6 08:57:44.690: INFO: Pod "pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-mxdvv" satisfied condition "success or failure"
+May  6 08:57:44.694: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-mxdvv container namespace-test: 
+STEP: delete the pod
+May  6 08:57:44.744: INFO: Waiting for pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-mxdvv to disappear
+May  6 08:57:44.749: INFO: Pod pod-service-account-fd58f199-6fdc-11e9-a235-ba138c0d9035-mxdvv no longer exists
+[AfterEach] [sig-auth] ServiceAccounts
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:57:44.750: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-svcaccounts-rbv97" for this suite.
+May  6 08:57:50.770: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:57:50.797: INFO: namespace: e2e-tests-svcaccounts-rbv97, resource: bindings, ignored listing per whitelist
+May  6 08:57:50.894: INFO: namespace e2e-tests-svcaccounts-rbv97 deletion completed in 6.137956793s
+
+• [SLOW TEST:17.009 seconds]
+[sig-auth] ServiceAccounts
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/auth/framework.go:22
+  should mount an API token into pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SS
+------------------------------
+[sig-storage] ConfigMap 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:57:50.896: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename configmap
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating configMap with name configmap-test-volume-072a1e00-6fdd-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume configMaps
+May  6 08:57:51.027: INFO: Waiting up to 5m0s for pod "pod-configmaps-072abe02-6fdd-11e9-a235-ba138c0d9035" in namespace "e2e-tests-configmap-jrqf8" to be "success or failure"
+May  6 08:57:51.034: INFO: Pod "pod-configmaps-072abe02-6fdd-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 6.436765ms
+May  6 08:57:53.039: INFO: Pod "pod-configmaps-072abe02-6fdd-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.010782843s
+STEP: Saw pod success
+May  6 08:57:53.039: INFO: Pod "pod-configmaps-072abe02-6fdd-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:57:53.043: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-configmaps-072abe02-6fdd-11e9-a235-ba138c0d9035 container configmap-volume-test: 
+STEP: delete the pod
+May  6 08:57:53.082: INFO: Waiting for pod pod-configmaps-072abe02-6fdd-11e9-a235-ba138c0d9035 to disappear
+May  6 08:57:53.087: INFO: Pod pod-configmaps-072abe02-6fdd-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] ConfigMap
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:57:53.087: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-configmap-jrqf8" for this suite.
+May  6 08:57:59.111: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:57:59.227: INFO: namespace: e2e-tests-configmap-jrqf8, resource: bindings, ignored listing per whitelist
+May  6 08:57:59.244: INFO: namespace e2e-tests-configmap-jrqf8 deletion completed in 6.149431177s
+
+• [SLOW TEST:8.348 seconds]
+[sig-storage] ConfigMap
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/configmap_volume.go:33
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSS
+------------------------------
+[sig-storage] Projected secret 
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:57:59.246: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename projected
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating projection with secret that has name projected-secret-test-0c2985a9-6fdd-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:57:59.425: INFO: Waiting up to 5m0s for pod "pod-projected-secrets-0c2a3b22-6fdd-11e9-a235-ba138c0d9035" in namespace "e2e-tests-projected-b6qvx" to be "success or failure"
+May  6 08:57:59.432: INFO: Pod "pod-projected-secrets-0c2a3b22-6fdd-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 7.726873ms
+May  6 08:58:01.439: INFO: Pod "pod-projected-secrets-0c2a3b22-6fdd-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.014466296s
+STEP: Saw pod success
+May  6 08:58:01.440: INFO: Pod "pod-projected-secrets-0c2a3b22-6fdd-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:58:01.445: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-projected-secrets-0c2a3b22-6fdd-11e9-a235-ba138c0d9035 container projected-secret-volume-test: 
+STEP: delete the pod
+May  6 08:58:01.498: INFO: Waiting for pod pod-projected-secrets-0c2a3b22-6fdd-11e9-a235-ba138c0d9035 to disappear
+May  6 08:58:01.512: INFO: Pod pod-projected-secrets-0c2a3b22-6fdd-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Projected secret
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:58:01.512: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-projected-b6qvx" for this suite.
+May  6 08:58:07.545: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:58:07.565: INFO: namespace: e2e-tests-projected-b6qvx, resource: bindings, ignored listing per whitelist
+May  6 08:58:07.680: INFO: namespace e2e-tests-projected-b6qvx deletion completed in 6.162092854s
+
+• [SLOW TEST:8.435 seconds]
+[sig-storage] Projected secret
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/projected_secret.go:34
+  should be consumable from pods in volume [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[k8s.io] Pods 
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:58:07.685: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename pods
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/pods.go:132
+[It] should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+May  6 08:58:07.809: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: creating the pod
+STEP: submitting the pod to kubernetes
+[AfterEach] [k8s.io] Pods
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:58:09.961: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-pods-6kwmf" for this suite.
+May  6 08:58:47.986: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:58:48.040: INFO: namespace: e2e-tests-pods-6kwmf, resource: bindings, ignored listing per whitelist
+May  6 08:58:48.102: INFO: namespace e2e-tests-pods-6kwmf deletion completed in 38.1355604s
+
+• [SLOW TEST:40.418 seconds]
+[k8s.io] Pods
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  should support remote command execution over websockets [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSS
+------------------------------
+[sig-node] Downward API 
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-node] Downward API
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:58:48.106: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward api env vars
+May  6 08:58:48.223: INFO: Waiting up to 5m0s for pod "downward-api-29411faa-6fdd-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-62fs8" to be "success or failure"
+May  6 08:58:48.235: INFO: Pod "downward-api-29411faa-6fdd-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 11.752912ms
+May  6 08:58:50.240: INFO: Pod "downward-api-29411faa-6fdd-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016367263s
+STEP: Saw pod success
+May  6 08:58:50.240: INFO: Pod "downward-api-29411faa-6fdd-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:58:50.243: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downward-api-29411faa-6fdd-11e9-a235-ba138c0d9035 container dapi-container: 
+STEP: delete the pod
+May  6 08:58:50.272: INFO: Waiting for pod downward-api-29411faa-6fdd-11e9-a235-ba138c0d9035 to disappear
+May  6 08:58:50.280: INFO: Pod downward-api-29411faa-6fdd-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-node] Downward API
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:58:50.280: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-62fs8" for this suite.
+May  6 08:58:56.302: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:58:56.388: INFO: namespace: e2e-tests-downward-api-62fs8, resource: bindings, ignored listing per whitelist
+May  6 08:58:56.410: INFO: namespace e2e-tests-downward-api-62fs8 deletion completed in 6.122766924s
+
+• [SLOW TEST:8.305 seconds]
+[sig-node] Downward API
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downward_api.go:38
+  should provide host IP as an env var [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSS
+------------------------------
+[sig-storage] Secrets 
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:58:56.412: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename secrets
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating secret with name secret-test-2e34fccd-6fdd-11e9-a235-ba138c0d9035
+STEP: Creating a pod to test consume secrets
+May  6 08:58:56.534: INFO: Waiting up to 5m0s for pod "pod-secrets-2e35ff9d-6fdd-11e9-a235-ba138c0d9035" in namespace "e2e-tests-secrets-cbgf8" to be "success or failure"
+May  6 08:58:56.546: INFO: Pod "pod-secrets-2e35ff9d-6fdd-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 11.439198ms
+May  6 08:58:58.551: INFO: Pod "pod-secrets-2e35ff9d-6fdd-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.016799257s
+STEP: Saw pod success
+May  6 08:58:58.551: INFO: Pod "pod-secrets-2e35ff9d-6fdd-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 08:58:58.559: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-secrets-2e35ff9d-6fdd-11e9-a235-ba138c0d9035 container secret-volume-test: 
+STEP: delete the pod
+May  6 08:58:58.601: INFO: Waiting for pod pod-secrets-2e35ff9d-6fdd-11e9-a235-ba138c0d9035 to disappear
+May  6 08:58:58.605: INFO: Pod pod-secrets-2e35ff9d-6fdd-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Secrets
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:58:58.605: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-secrets-cbgf8" for this suite.
+May  6 08:59:04.626: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:59:04.694: INFO: namespace: e2e-tests-secrets-cbgf8, resource: bindings, ignored listing per whitelist
+May  6 08:59:04.762: INFO: namespace e2e-tests-secrets-cbgf8 deletion completed in 6.153533304s
+
+• [SLOW TEST:8.350 seconds]
+[sig-storage] Secrets
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/secrets_volume.go:34
+  should be consumable from pods in volume with defaultMode set [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSS
+------------------------------
+[sig-network] Services 
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:59:04.763: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename services
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:85
+[It] should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: creating service endpoint-test2 in namespace e2e-tests-services-br54l
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-br54l to expose endpoints map[]
+May  6 08:59:04.947: INFO: Get endpoints failed (9.268725ms elapsed, ignoring for 5s): endpoints "endpoint-test2" not found
+May  6 08:59:05.951: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-br54l exposes endpoints map[] (1.013853625s elapsed)
+STEP: Creating pod pod1 in namespace e2e-tests-services-br54l
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-br54l to expose endpoints map[pod1:[80]]
+May  6 08:59:07.995: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-br54l exposes endpoints map[pod1:[80]] (2.03347685s elapsed)
+STEP: Creating pod pod2 in namespace e2e-tests-services-br54l
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-br54l to expose endpoints map[pod1:[80] pod2:[80]]
+May  6 08:59:10.061: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-br54l exposes endpoints map[pod1:[80] pod2:[80]] (2.060274681s elapsed)
+STEP: Deleting pod pod1 in namespace e2e-tests-services-br54l
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-br54l to expose endpoints map[pod2:[80]]
+May  6 08:59:10.092: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-br54l exposes endpoints map[pod2:[80]] (24.814089ms elapsed)
+STEP: Deleting pod pod2 in namespace e2e-tests-services-br54l
+STEP: waiting up to 3m0s for service endpoint-test2 in namespace e2e-tests-services-br54l to expose endpoints map[]
+May  6 08:59:10.111: INFO: successfully validated that service endpoint-test2 in namespace e2e-tests-services-br54l exposes endpoints map[] (6.753914ms elapsed)
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:59:10.132: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-services-br54l" for this suite.
+May  6 08:59:32.155: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:59:32.291: INFO: namespace: e2e-tests-services-br54l, resource: bindings, ignored listing per whitelist
+May  6 08:59:32.291: INFO: namespace e2e-tests-services-br54l deletion completed in 22.151907865s
+[AfterEach] [sig-network] Services
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/service.go:90
+
+• [SLOW TEST:27.528 seconds]
+[sig-network] Services
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/network/framework.go:22
+  should serve a basic endpoint from pods  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSS
+------------------------------
+[sig-cli] Kubectl client [k8s.io] Kubectl api-versions 
+  should check if v1 is in available api versions  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:59:32.292: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename kubectl
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/kubectl.go:243
+[It] should check if v1 is in available api versions  [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: validating api versions
+May  6 08:59:32.428: INFO: Running '/usr/local/bin/kubectl --kubeconfig=/tmp/kubeconfig-307990706 api-versions'
+May  6 08:59:32.591: INFO: stderr: ""
+May  6 08:59:32.591: INFO: stdout: "admissionregistration.k8s.io/v1alpha1\nadmissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\napps/v1beta1\napps/v1beta2\nauditregistration.k8s.io/v1alpha1\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\nbatch/v2alpha1\ncertificates.k8s.io/v1beta1\ncoordination.k8s.io/v1beta1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nnetworking.k8s.io/v1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1alpha1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1alpha1\nscheduling.k8s.io/v1beta1\nsettings.k8s.io/v1alpha1\nstorage.k8s.io/v1\nstorage.k8s.io/v1alpha1\nstorage.k8s.io/v1beta1\nv1\n"
+[AfterEach] [sig-cli] Kubectl client
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 08:59:32.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-kubectl-krcgp" for this suite.
+May  6 08:59:38.616: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 08:59:38.757: INFO: namespace: e2e-tests-kubectl-krcgp, resource: bindings, ignored listing per whitelist
+May  6 08:59:38.758: INFO: namespace e2e-tests-kubectl-krcgp deletion completed in 6.161837475s
+
+• [SLOW TEST:6.465 seconds]
+[sig-cli] Kubectl client
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/kubectl/framework.go:22
+  [k8s.io] Kubectl api-versions
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+    should check if v1 is in available api versions  [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+[sig-storage] Subpath Atomic writer volumes 
+  should support subpaths with projected pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 08:59:38.758: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename subpath
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] Atomic writer volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:38
+STEP: Setting up data
+[It] should support subpaths with projected pod [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating pod pod-subpath-test-projected-x74x
+STEP: Creating a pod to test atomic-volume-subpath
+May  6 08:59:38.927: INFO: Waiting up to 5m0s for pod "pod-subpath-test-projected-x74x" in namespace "e2e-tests-subpath-6xgph" to be "success or failure"
+May  6 08:59:38.945: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Pending", Reason="", readiness=false. Elapsed: 16.195951ms
+May  6 08:59:40.955: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Pending", Reason="", readiness=false. Elapsed: 2.025879944s
+May  6 08:59:42.960: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 4.031305468s
+May  6 08:59:44.966: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 6.036532051s
+May  6 08:59:46.970: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 8.040617251s
+May  6 08:59:48.974: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 10.04515826s
+May  6 08:59:50.997: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 12.067626722s
+May  6 08:59:53.001: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 14.071746994s
+May  6 08:59:55.007: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 16.077648616s
+May  6 08:59:57.012: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 18.083424985s
+May  6 08:59:59.018: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 20.088787607s
+May  6 09:00:01.022: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Running", Reason="", readiness=false. Elapsed: 22.093446602s
+May  6 09:00:03.069: INFO: Pod "pod-subpath-test-projected-x74x": Phase="Succeeded", Reason="", readiness=false. Elapsed: 24.139898211s
+STEP: Saw pod success
+May  6 09:00:03.069: INFO: Pod "pod-subpath-test-projected-x74x" satisfied condition "success or failure"
+May  6 09:00:03.093: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-subpath-test-projected-x74x container test-container-subpath-projected-x74x: 
+STEP: delete the pod
+May  6 09:00:03.155: INFO: Waiting for pod pod-subpath-test-projected-x74x to disappear
+May  6 09:00:03.176: INFO: Pod pod-subpath-test-projected-x74x no longer exists
+STEP: Deleting pod pod-subpath-test-projected-x74x
+May  6 09:00:03.176: INFO: Deleting pod "pod-subpath-test-projected-x74x" in namespace "e2e-tests-subpath-6xgph"
+[AfterEach] [sig-storage] Subpath
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 09:00:03.181: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-subpath-6xgph" for this suite.
+May  6 09:00:09.238: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 09:00:09.472: INFO: namespace: e2e-tests-subpath-6xgph, resource: bindings, ignored listing per whitelist
+May  6 09:00:09.498: INFO: namespace e2e-tests-subpath-6xgph deletion completed in 6.312639729s
+
+• [SLOW TEST:30.741 seconds]
+[sig-storage] Subpath
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/utils/framework.go:22
+  Atomic writer volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/storage/subpath.go:34
+    should support subpaths with projected pod [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+S
+------------------------------
+[k8s.io] Container Lifecycle Hook when create a pod with lifecycle hook 
+  should execute poststart http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 09:00:09.500: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename container-lifecycle-hook
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] when create a pod with lifecycle hook
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:61
+STEP: create the container to handle the HTTPGet hook request.
+[It] should execute poststart http hook properly [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: create the pod with lifecycle hook
+STEP: check poststart hook
+STEP: delete the pod with lifecycle hook
+May  6 09:00:17.879: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+May  6 09:00:17.886: INFO: Pod pod-with-poststart-http-hook still exists
+May  6 09:00:19.886: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+May  6 09:00:19.892: INFO: Pod pod-with-poststart-http-hook still exists
+May  6 09:00:21.889: INFO: Waiting for pod pod-with-poststart-http-hook to disappear
+May  6 09:00:21.894: INFO: Pod pod-with-poststart-http-hook no longer exists
+[AfterEach] [k8s.io] Container Lifecycle Hook
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 09:00:21.894: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-container-lifecycle-hook-krlt8" for this suite.
+May  6 09:00:43.919: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 09:00:44.087: INFO: namespace: e2e-tests-container-lifecycle-hook-krlt8, resource: bindings, ignored listing per whitelist
+May  6 09:00:44.108: INFO: namespace e2e-tests-container-lifecycle-hook-krlt8 deletion completed in 22.209916769s
+
+• [SLOW TEST:34.609 seconds]
+[k8s.io] Container Lifecycle Hook
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:694
+  when create a pod with lifecycle hook
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/lifecycle_hook.go:40
+    should execute poststart http hook properly [NodeConformance] [Conformance]
+    /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] Downward API volume 
+  should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 09:00:44.110: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename downward-api
+STEP: Waiting for a default service account to be provisioned in namespace
+[BeforeEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:39
+[It] should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test downward API volume plugin
+May  6 09:00:44.261: INFO: Waiting up to 5m0s for pod "downwardapi-volume-6e6bc4a7-6fdd-11e9-a235-ba138c0d9035" in namespace "e2e-tests-downward-api-j6wn8" to be "success or failure"
+May  6 09:00:44.265: INFO: Pod "downwardapi-volume-6e6bc4a7-6fdd-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 3.033914ms
+May  6 09:00:46.274: INFO: Pod "downwardapi-volume-6e6bc4a7-6fdd-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.012129003s
+STEP: Saw pod success
+May  6 09:00:46.274: INFO: Pod "downwardapi-volume-6e6bc4a7-6fdd-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 09:00:46.277: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod downwardapi-volume-6e6bc4a7-6fdd-11e9-a235-ba138c0d9035 container client-container: 
+STEP: delete the pod
+May  6 09:00:46.312: INFO: Waiting for pod downwardapi-volume-6e6bc4a7-6fdd-11e9-a235-ba138c0d9035 to disappear
+May  6 09:00:46.318: INFO: Pod downwardapi-volume-6e6bc4a7-6fdd-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] Downward API volume
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 09:00:46.318: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-downward-api-j6wn8" for this suite.
+May  6 09:00:52.353: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 09:00:52.546: INFO: namespace: e2e-tests-downward-api-j6wn8, resource: bindings, ignored listing per whitelist
+May  6 09:00:52.559: INFO: namespace e2e-tests-downward-api-j6wn8 deletion completed in 6.235499997s
+
+• [SLOW TEST:8.449 seconds]
+[sig-storage] Downward API volume
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go:34
+  should set DefaultMode on files [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SSSSSSSSSSSSSSSSSS
+------------------------------
+[sig-storage] EmptyDir volumes 
+  should support (non-root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+[BeforeEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:153
+STEP: Creating a kubernetes client
+May  6 09:00:52.560: INFO: >>> kubeConfig: /tmp/kubeconfig-307990706
+STEP: Building a namespace api object, basename emptydir
+STEP: Waiting for a default service account to be provisioned in namespace
+[It] should support (non-root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+STEP: Creating a pod to test emptydir 0777 on node default medium
+May  6 09:00:52.709: INFO: Waiting up to 5m0s for pod "pod-7373eb90-6fdd-11e9-a235-ba138c0d9035" in namespace "e2e-tests-emptydir-jmbvs" to be "success or failure"
+May  6 09:00:52.740: INFO: Pod "pod-7373eb90-6fdd-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 31.242543ms
+May  6 09:00:54.745: INFO: Pod "pod-7373eb90-6fdd-11e9-a235-ba138c0d9035": Phase="Pending", Reason="", readiness=false. Elapsed: 2.036301796s
+May  6 09:00:56.750: INFO: Pod "pod-7373eb90-6fdd-11e9-a235-ba138c0d9035": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.041612328s
+STEP: Saw pod success
+May  6 09:00:56.750: INFO: Pod "pod-7373eb90-6fdd-11e9-a235-ba138c0d9035" satisfied condition "success or failure"
+May  6 09:00:56.754: INFO: Trying to get logs from node kubernetes-cluster-2696-minion-0 pod pod-7373eb90-6fdd-11e9-a235-ba138c0d9035 container test-container: 
+STEP: delete the pod
+May  6 09:00:56.783: INFO: Waiting for pod pod-7373eb90-6fdd-11e9-a235-ba138c0d9035 to disappear
+May  6 09:00:56.804: INFO: Pod pod-7373eb90-6fdd-11e9-a235-ba138c0d9035 no longer exists
+[AfterEach] [sig-storage] EmptyDir volumes
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:154
+May  6 09:00:56.805: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
+STEP: Destroying namespace "e2e-tests-emptydir-jmbvs" for this suite.
+May  6 09:01:02.825: INFO: Waiting up to 30s for server preferred namespaced resources to be successfully discovered
+May  6 09:01:02.892: INFO: namespace: e2e-tests-emptydir-jmbvs, resource: bindings, ignored listing per whitelist
+May  6 09:01:03.009: INFO: namespace e2e-tests-emptydir-jmbvs deletion completed in 6.19860168s
+
+• [SLOW TEST:10.449 seconds]
+[sig-storage] EmptyDir volumes
+/workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/common/empty_dir.go:40
+  should support (non-root,0777,default) [NodeConformance] [Conformance]
+  /workspace/anago-v1.13.3-beta.0.37+721bfa751924da/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:699
+------------------------------
+SMay  6 09:01:03.009: INFO: Running AfterSuite actions on all nodes
+May  6 09:01:03.009: INFO: Running AfterSuite actions on node 1
+May  6 09:01:03.009: INFO: Skipping dumping logs from cluster
+
+Ran 200 of 2161 Specs in 5621.536 seconds
+SUCCESS! -- 200 Passed | 0 Failed | 0 Pending | 1961 Skipped PASS
+
+Ginkgo ran 1 suite in 1h33m42.727400201s
+Test Suite Passed
diff --git a/v1.13/mcs/junit_01.xml b/v1.13/mcs/junit_01.xml
new file mode 100644
index 0000000000..0943bcaa72
--- /dev/null
+++ b/v1.13/mcs/junit_01.xml
@@ -0,0 +1,6086 @@
+
+  
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+          
+      
+      
+      
+          
+      
+  
\ No newline at end of file